input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0],
[1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0],
[1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1],
[0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1],
[1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0],
[1, 1, 1, 0, | |
#!/usr/bin/env python3
import argparse
import requests
import urllib3
import sys
import json
import re
import time
# The purpose of this script is to facilitate backup/restore of settings in RP4VMs
# The script exclusively uses the new RESTful API in RP4VMs 5.3
# Author - <NAME> <<EMAIL>>
# Version 1 - April 2021
# Copyright [2021] [<NAME>]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
urllib3.disable_warnings()
def get_args():
# Get command line args from the user
parser = argparse.ArgumentParser(
description='Script to backup and restore settings in RecoverPoint for VMs')
parser.add_argument('-s', '--server', required=True,
action='store', help='RP4VMs Plugin Server DNS name or IP')
parser.add_argument('-cfile', '--credsfile', required=True,
action='store', help='Path to credentials file')
parser.add_argument('-a', '--action', required=True, choices=['backup', 'restore'],
help='Choose to backup or restore settings')
parser.add_argument('-file', '--file', required=True,
action='store', help='Path to file for backup/restore')
parser.add_argument('-vc', "--vcenter", required=('new-vc' in sys.argv), action='store',
help='Provide vCenter DNS name or IP')
parser.add_argument('-cpairs', '--clusterpairs', required=False, action='store',
help='Provide RP4VMs cluster pairing in format of oldcl1,newcl1,oldcl12,newcl2')
parser.add_argument('-nmonitor', '--no-monitor', required=False, action='store_true', dest='nmonitor',
default=False, help='Optionally prevents monitoring of protection process')
args = parser.parse_args()
return args
def init_rest_call(calltype, uri, user, password, payload=None):
# BETA refactor call to initiate rest calls
code = 200
headers = {'Content-Type': 'application/json'}
verify = False
try:
if calltype.lower() == "get":
response = requests.get(uri, headers=headers, auth=(user, password), verify=verify)
else:
response = requests.calltype(uri, headers=headers, data=payload , auth=(user, password), verify=verify)
response.raise_for_status()
except requests.exceptions.ConnectionError as err:
print('Error Connecting to {}: {}'.format(uri, err))
sys.exit(1)
except requests.exceptions.Timeout as err:
print('Connection timed out {}: {}'.format(urllib3, err))
sys.exit(1)
except requests.exceptions.RequestException as err:
print("The call {} {} failed with exception:{}".format(response.request.method, response.url, err))
if (response.status_code != code):
raise Exception('Failed to query {}, code: {}, body: {}'.format(uri, response.status_code, response.text))
return response.json()
def get_clusters(uri, user, password):
# Gets list of RP4VMs clusters
headers = {'Content-Type': 'application/json'}
suffix = "/rp-clusters"
uri += suffix
try:
response = requests.get(uri, headers=headers, auth=(user, password), verify=False)
response.raise_for_status()
except requests.exceptions.RequestException as err:
print("The call {} {} failed with exception:{}".format(response.request.method, response.url, err))
if (response.status_code != 200):
raise Exception('Failed to query {}, code: {}, body: {}'.format(
uri, response.status_code, response.text))
return response.json()
def get_creds(credsfile, uri):
# Gets and validates credentials
file = open(credsfile, 'r')
credstring = file.read().rstrip()
file.close()
user, password = credstring.split(' ')
suffixurl = "/version"
uri += suffixurl
headers = {'Content-Type': 'application/json'}
try:
response = requests.get(uri, headers=headers, auth=(user, password),verify=False)
response.raise_for_status()
except requests.exceptions.ConnectionError as err:
print('Error Connecting to {}: {}'.format(uri, err))
sys.exit(1)
except requests.exceptions.Timeout as err:
print('Connection timed out {}: {}'.format(urllib3, err))
sys.exit(1)
except requests.exceptions.RequestException as err:
print("The call {} {} failed with exception:{}".format(response.request.method, response.url, err))
sys.exit(1)
if (response.status_code != 200):
raise Exception('Invalid credentials, code: {}, body: {}'.format(
response.status_code, response.text))
return user, password
def backup_general(uri, user, password, file):
# Backs up general config data like CGs, VMs, Group Sets, VCs, etc.
suffixlist = "/groups", "/vms", "/group-sets", "/vcs", "/licenses", "/rp-clusters"
headers = {'Content-Type': 'application/json'}
fileh = open(file, 'w')
for suffix in suffixlist:
nuri = uri + suffix
try:
response = requests.get(nuri, headers=headers, auth=(user, password), verify=False)
response.raise_for_status()
except requests.exceptions.RequestException as err:
print("The call {} {} failed with exception:{}".format(response.request.method, response.url, err))
if (response.status_code != 200):
raise Exception('Failed to query {}, code: {}, body: {}'.format(
uri, response.status_code, response.text))
if (suffix == "/groups"):
groups = response.json()
fileh.write(response.request.method + ' ' + response.url + "\n")
fileh.write(str(response.json()))
fileh.write("\nEND\n")
fileh.close()
return groups
def get_all_copies(groups):
# Get all copies for all CGs
copies = []
for group in groups:
for copy in group["copyIds"]:
copies.append("/groups/{}/copies/{}".format(group["name"], copy))
return copies
def load_json(data):
# Align data and convert to JSON
if (isinstance(data, dict)):
for copyid in data:
data[copyid] = data[copyid].replace("\'", "\"")
data[copyid] = data[copyid].replace("True", "true")
data[copyid] = data[copyid].replace("False", "false")
data[copyid] = json.loads(data[copyid])
else:
data = data.replace("\'", "\"")
data = data.replace("True", "true")
data = data.replace("False", "false")
data = json.loads(data)
return data
def backup_groups(uri, user, password, file, copies):
# Backs up per-CG and per-Copy specific settings
headers = {'Content-Type': 'application/json'}
suffixlist = "/", "/journals", "/re-ip"
payload = None
method = "GET"
fileh = open(file, 'a')
for copy in copies:
for suffix in suffixlist:
nuri = uri + copy + suffix
response = init_rest_call(method, nuri, user, password, payload)
fileh.write(method + ' ' + nuri + "\n")
fileh.write(str(response))
fileh.write("\nEND\n")
fileh.close()
return None
def validate_cluster_pairs(cpairs):
# Validates the cluster pairs parameter
if cpairs:
if (cpairs.find(',') < 1):
clusters = get_clusters(uri, user, password)
print("Incorrect format of the cluster pairs parameter, existing")
sys.argv(1)
else:
cpairs = cpairs.split(',')
if (len(cpairs) % 2 != 0):
print("Incorrect format of the cluster pairs parameter, existing")
sys.argv(1)
else:
return cpairs
else:
return None
def extract_backup_data(file):
# Extract backup information from backup file
fileh = open(file, 'r')
data = {}
copies = {}
copyparams = 'copies', 'journals', 're-ip'
for param in copyparams:
copies[param] = {}
for line in fileh:
if line.startswith("GET"):
check = re.search("/v1/(.*)$", line).groups()[0]
if "copies" in check:
match = re.search("/groups/(.*)/copies/(\w+)/(.*?)$", line)
group, copyid = match.groups()[0], match.groups()[1]
if not match.groups()[2]:
check = "copies"
else:
check = match.groups()[2]
elif line.startswith("END"):
check = None
else:
if check in copyparams:
copies[check][copyid] = line
else:
data[check] = line
fileh.close()
return data, copies
def merge_group_data(groups, vms, copysettings, journals, reip):
# Add protected VMs to the their respective CG
for group in groups:
vmlist = []
prodvmcounter = 0
copylist = []
for vm in vms:
if vm["groupId"] == group["id"]:
vmlist.append(vm)
group.update({'vms': [vmlist]})
for vm in vmlist:
if "PRODUCTION" in vm["role"]:
prodvmcounter += 1
group.update({'prodvmcount': prodvmcounter})
for copyid in journals:
if (group["id"] in copyid):
group.update({'journals': journals[copyid]})
for copyid in reip:
if (group["id"] in copyid):
group.update({'re-ip': reip[copyid]})
for copyid in copysettings:
if (group["id"] in copyid):
copylist.append(copysettings[copyid])
group.update({'copies': '{}'.format(json.dumps(copylist))})
return groups
def check_rep_topology(group):
if len(group["copyIds"]) == 2:
return True
else:
return False
def determine_rpcluster(uri, user, password, group, cpairs):
# Gets the desired RP4VMs cluster
clusters = get_clusters(uri, user, password)
if cpairs:
for counter in range(len(cpairs)-1):
if group["prodRpClusterName"] == cpairs[counter]:
rpcluster = cpairs[counter+1]
else:
counter += 1
else:
if clusters[0]["isRegistered"]:
rpcluster = clusters[0]["name"]
else:
rpcluster = clusters[1]["name"]
return rpcluster
def get_candidates(uri, user, password, name):
# Gets candidate VMs for replication
suffixurl = "/vms/protect/candidates"
uri += suffixurl
headers = {'Content-Type': 'application/json'}
filter = ''
bfilter = filter
if name != None:
filter += name
params = {'vms': filter}
try:
response = requests.get(uri, headers=headers, params=params, auth=(user, password), verify=False)
response.raise_for_status()
except requests.exceptions.RequestException as err:
print("The call {} {} failed with exception:{}".format(response.request.method, response.url, err))
if (response.status_code != 200):
raise Exception('Failed to query {}, code: {}, body: {}'.format(
uri, response.status_code, response.text))
exactresult = response.json()
vms = []
if (response.status_code == 200 and response.json() == [] and name != None):
params = {'vms': bfilter}
try:
response = requests.get(uri, headers=headers, params=params, auth=(user, password), verify=False)
response.raise_for_status()
except requests.exceptions.RequestException as err:
print("The call {} {} failed with exception:{}".format(response.request.method, response.url, err))
if (response.status_code != 200):
raise Exception('Failed to query {}, code: {}, body: {}'.format(
uri, response.status_code, response.text))
for vm in response.json():
if re.match(name.lower(),vm['name'].lower()):
vms.append(vm)
if vms:
return vms
return exactresult
def determine_journal(group):
# Checks if non-default journal size is used for replica copy
totaljournal = 0
for journal in group["journals"]:
if journal["copyId"] != group["prodCopyId"]:
totaljournal += journal["sizeInMB"]
return totaljournal
def exclude_disks(defaults, group):
# Determines whether to exclude disks or not
excludeddisks = {}
for vm in group["vms"][0]:
for disk in vm["vmdks"]:
if not disk["included"]:
excludeddisks[vm["id"]] = []
excludeddisks[vm["id"]].append(disk["path"])
if len(excludeddisks) == 0:
return defaults
else:
if len(group["vms"] == 1):
for disk in defaults["protectedVmdks"]:
for excludeddisk in excludeddisk[defaults["vm"]]:
if disk == excludeddisk:
defaults["protectedVmdks"].remove(disk)
else:
for vms in defaults["vms"]:
for disk in vm["protectedVmdks"]:
for excludeddisk in excludeddisk[vm["vm"]]:
if disk == excludeddisk:
vm["protectedVmdks"].remove(disk)
return defaults
def get_defaults(uri, user, password, group, rpcluster):
# Gets recommended replication parameters
suffixurl = "/vms/protect/defaults"
suffixurl2 = "/vms/protect-to-single-group/defaults"
vms = group["vms"]
headers = {'Content-Type': 'application/json'}
if group["prodvmcount"] == 0:
print("no VMs found for group: {}, exiting".format(group["name"]))
sys.exit(1)
else:
vmlist = []
vmdict = {}
for vm in vms[0]:
if "PRODUCTION" | |
import json
import pathlib
import urllib3
import dash
from dash.dependencies import Input, Output, State, ALL, ClientsideFunction
from dash.exceptions import PreventUpdate
from dash.dash import no_update
import dash_html_components as html
import dash_bootstrap_components as dbc
from flask import flash, get_flashed_messages
from flask_caching import Cache
from data import dev
import preprocessing
from settings import (
SECRET_KEY,
DB_URL,
DEBUG,
MANAGE_DB,
SKIP_TS,
SC_FILTERS,
USE_DUMMY_DATA,
CACHE_CONFIG,
MAX_WARNINGS,
MAX_INFOS,
)
import scenario
import graphs
from models import db, get_model_options, Filter, Colors, Labels
urllib3.disable_warnings()
APP_PATH = str(pathlib.Path(__file__).parent.resolve())
# Initialize app
app = dash.Dash(
__name__,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=4.0"},
],
external_stylesheets=[dbc.themes.BOOTSTRAP],
)
server = app.server
server.secret_key = SECRET_KEY
# Database
server.config["SQLALCHEMY_DATABASE_URI"] = DB_URL
server.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(server)
# Cache
cache = Cache()
cache.init_app(server, config=CACHE_CONFIG)
# Layout
if not MANAGE_DB:
from layout import (
DEFAULT_LAYOUT,
IMPRINT_LAYOUT,
PRIVACY_LAYOUT,
get_layout,
get_graph_options,
get_error_and_warnings_div,
)
app.layout = DEFAULT_LAYOUT
app.validation_layout = html.Div(
[
DEFAULT_LAYOUT,
get_layout(app, scenarios=scenario.get_scenarios()),
IMPRINT_LAYOUT,
PRIVACY_LAYOUT,
]
)
# Multiple pages
@app.callback(
dash.dependencies.Output("page-content", "children"),
[dash.dependencies.Input("url", "pathname")],
)
def display_page(pathname):
if pathname == "/imprint":
return IMPRINT_LAYOUT
elif pathname == "/privacy":
return PRIVACY_LAYOUT
else:
return get_layout(app, scenarios=scenario.get_scenarios())
@cache.memoize()
def get_scenario_data(scenario_id, table):
app.logger.info(f"Loading scenario data #{scenario_id} (not cached)...")
if USE_DUMMY_DATA:
return dev.get_dummy_data(scenario_id, table)
return scenario.get_scenario_data(scenario_id, table)
@cache.memoize()
def get_multiple_scenario_data(*scenario_ids, table):
app.logger.info("Merging scenario data (not cached)...")
scenarios = [get_scenario_data(scenario_id, table) for scenario_id in scenario_ids]
merged = scenario.merge_scenario_data(scenarios)
app.logger.info("Merged scenario data")
return merged
@cache.memoize()
def get_scenario_filters(scenario_id):
app.logger.info(f"Loading scenario data #{scenario_id} (not cached)...")
if USE_DUMMY_DATA:
return dev.get_dummy_filters(scenario_id)
return scenario.get_scenario_filters(scenario_id)
@cache.memoize()
def get_multiple_scenario_filters(*scenario_ids):
app.logger.info("Merging scenario data (not cached)...")
scenarios = [
get_scenario_filters(scenario_id) for scenario_id in scenario_ids
]
merged = scenario.merge_scenario_data(scenarios)
app.logger.info("Merged scenario data")
return merged
@app.callback(
Output(component_id="dd_scenario", component_property="options"),
Input("scenario_reload", "n_clicks"),
)
def reload_scenarios(_):
scenarios = scenario.get_scenarios()
return [
{"label": f"{sc['id']}, {sc['scenario']}, {sc['source']}", "value": sc["id"],}
for sc in scenarios
]
app.clientside_callback(
ClientsideFunction(namespace="clientside", function_name="update_refresh_elements"),
Output(component_id="refresh_scalars", component_property="className"),
[
Input("dd_scenario", "value"),
Input(component_id="order_by", component_property="value"),
Input(component_id="aggregation_group_by", component_property="value"),
Input({"name": ALL, "type": "filters"}, "value"),
Input({"name": ALL, "type": "unit-dropdown"}, "value"),
Input({"name": ALL, "type": "graph_scalars_option"}, "value"),
Input("load_filters", "value"),
Input("load_colors", "value"),
Input("load_labels", "value"),
],
prevent_initial_call=True,
)
@app.callback(
[
Output(component_id="load_filters", component_property="options"),
Output(component_id="save_filters_name", component_property="value"),
],
Input("save_filters", "n_clicks"),
[
State(component_id="save_filters_name", component_property="value"),
State(component_id="graph_scalars_options", component_property="children"),
State(component_id="graph_timeseries_options", component_property="children"),
State(component_id="order_by", component_property="value"),
State(component_id="aggregation_group_by", component_property="value"),
State(component_id="filters", component_property="children"),
],
)
def save_filters(
_,
name,
graph_scalars_options,
graph_timeseries_options,
order_by,
agg_group_by,
filter_div,
):
if not name:
raise PreventUpdate
filters = preprocessing.extract_filters("scalars", filter_div)
filters["order_by"] = order_by
filters["agg_group_by"] = agg_group_by
scalar_graph_options = preprocessing.extract_graph_options(graph_scalars_options)
ts_graph_options = preprocessing.extract_graph_options(graph_timeseries_options)
db_filter = Filter(
name=name,
filters=filters,
scalar_graph_options=scalar_graph_options,
ts_graph_options=ts_graph_options,
)
db.session.add(db_filter)
db.session.commit()
return get_model_options(Filter), ""
@app.callback(
[
Output(component_id="load_colors", component_property="options"),
Output(component_id="save_colors_name", component_property="value"),
Output(component_id="colors_error", component_property="children"),
],
Input("save_colors", "n_clicks"),
[
State(component_id="save_colors_name", component_property="value"),
State(component_id="colors", component_property="value"),
],
)
def save_colors(_, name, str_colors):
if not name:
raise PreventUpdate
try:
colors = json.loads(str_colors)
except json.JSONDecodeError as je:
flash(
f"Could not read color mapping. Input must be valid JSON. (Error: {je})",
"error",
)
return get_model_options(Colors), "", show_logs()
db_colors = Colors(name=name, colors=colors,)
db.session.add(db_colors)
db.session.commit()
return get_model_options(Colors), "", show_logs()
@app.callback(
[
Output(component_id="load_labels", component_property="options"),
Output(component_id="save_labels_name", component_property="value"),
Output(component_id="labels_error", component_property="children"),
],
Input("save_labels", "n_clicks"),
[
State(component_id="save_labels_name", component_property="value"),
State(component_id="labels", component_property="value"),
],
)
def save_labels(_, name, str_labels):
if not name:
raise PreventUpdate
try:
labels = json.loads(str_labels)
except json.JSONDecodeError as je:
flash(
f"Could not read labels. Input must be valid JSON. (Error: {je})", "error"
)
return get_model_options(Labels), "", show_logs()
db_labels = Labels(name=name, labels=labels,)
db.session.add(db_labels)
db.session.commit()
return get_model_options(Labels), "", show_logs()
# @app.callback(
# [
# Output(component_id="view-dashboard_sclar", component_property="className"),
# Output(component_id="view-dashboard-data", component_property="className"),
# ],
# [
# Input("view-dashboard", "n_clicks"),
# Input("view-dashboard-data", "n_clicks"),
# ],
# prevent_initial_call=True,
# )
# def show_data(_, __):
# ctx = dash.callback_context
# if "view-dashboard-data" in ctx.triggered[0]["prop_id"]:
# return "view view--dashboard", "view view--dashboard-data active"
# else:
# return "view view--dashboard active", "view view--dashboard-data"
@app.callback(
[
Output(component_id="graph_scalars_plot_switch", component_property="value"),
Output(component_id="graph_timeseries_plot_switch", component_property="value"),
Output(component_id="order_by", component_property="value"),
Output(component_id="aggregation_group_by", component_property="value"),
]
+ [
Output(
component_id={"name": filter_, "type": "filter-dropdown"},
component_property="value",
)
for filter_ in SC_FILTERS
]
+ [Output(component_id="save_load_errors", component_property="children")],
Input("load_filters", "value"),
State(component_id="dd_scenario", component_property="value"),
prevent_initial_call=True,
)
def load_filters(name, scenarios):
if not name:
raise PreventUpdate
if not scenarios:
flash("No scenario selected - cannot load filters without scenario", "error")
return (
no_update,
no_update,
no_update,
*([no_update] * len(SC_FILTERS)),
show_logs(),
)
db_filter = Filter.query.filter_by(name=name).first()
filters = [db_filter.filters.get(filter_, None) for filter_ in SC_FILTERS]
flash("Successfully loaded filters", "info")
return (
db_filter.scalar_graph_options["type"],
db_filter.ts_graph_options["type"],
db_filter.filters.get("order_by", []),
db_filter.filters["agg_group_by"],
*filters,
show_logs(),
)
@app.callback(
Output(component_id="colors", component_property="value"),
Input("load_colors", "value"),
prevent_initial_call=True,
)
def load_colors(name):
if not name:
raise PreventUpdate
db_colors = Colors.query.filter_by(name=name).first()
return json.dumps(db_colors.colors)
@app.callback(
Output(component_id="labels", component_property="value"),
Input("load_labels", "value"),
prevent_initial_call=True,
)
def load_labels(name):
if not name:
raise PreventUpdate
db_labels = Labels.query.filter_by(name=name).first()
return json.dumps(db_labels.labels)
@app.callback(
[
Output(
component_id={"name": filter_, "type": "filter-dropdown"},
component_property="options",
)
for filter_ in SC_FILTERS
],
[Input(component_id="dd_scenario", component_property="value")],
)
def load_scenario(scenarios):
if scenarios is None:
raise PreventUpdate
scenarios = scenarios if isinstance(scenarios, list) else [scenarios]
filters = get_multiple_scenario_filters(*scenarios)
app.logger.info("Data successfully loaded")
return preprocessing.get_filter_options(filters)
@app.callback(
[Output(component_id="graph_scalars_options", component_property="children")],
[
Input(component_id="graph_scalars_plot_switch", component_property="value"),
Input("load_filters", "value"),
],
prevent_initial_call=True,
)
def toggle_scalar_graph_options(plot_type, name):
# Have to use "callback_context" as every component can only have one output callback
ctx = dash.callback_context
if ctx.triggered[0]["prop_id"] == "graph_scalars_plot_switch.value":
graph_scalar_options = get_graph_options("scalars", plot_type)
else:
if not name:
raise PreventUpdate
db_filter = Filter.query.filter_by(name=name).first()
graph_scalar_options = get_graph_options(
"scalars",
db_filter.scalar_graph_options["type"],
db_filter.scalar_graph_options["options"],
)
return (graph_scalar_options,)
@app.callback(
[Output(component_id="graph_timeseries_options", component_property="children")],
[
Input(component_id="graph_timeseries_plot_switch", component_property="value"),
Input("load_filters", "value"),
],
prevent_initial_call=True,
)
def toggle_timeseries_graph_options(plot_type, name):
# Have to use "callback_context" as every component can only have one output callback
ctx = dash.callback_context
if ctx.triggered[0]["prop_id"] == "graph_timeseries_plot_switch.value":
graph_timeseries_options = get_graph_options("timeseries", plot_type)
else:
if not name:
raise PreventUpdate
db_filter = Filter.query.filter_by(name=name).first()
graph_timeseries_options = get_graph_options(
"timeseries",
db_filter.ts_graph_options["type"],
db_filter.ts_graph_options["options"],
)
return (graph_timeseries_options,)
@app.callback(
[
Output(component_id="graph_scalars", component_property="figure"),
Output(component_id="table_scalars", component_property="data"),
Output(component_id="table_scalars", component_property="columns"),
Output(component_id="graph_scalars_error", component_property="children"),
Output(component_id="tab_scalars_error", component_property="labelClassName"),
Output(component_id="view-dashboard_scalars", component_property="className"),
Output(component_id="view-dashboard-data_scalars", component_property="className"),
Output(component_id="table_div_scalars", component_property="style"),
],
[
Input(component_id="refresh_scalars", component_property="n_clicks"),
Input(component_id="view-dashboard_scalars", component_property="n_clicks"),
Input(component_id="view-dashboard-data_scalars", component_property="n_clicks"),
],
[
State(component_id="view-dashboard-data_scalars", component_property="className"),
State(component_id="units", component_property="children"),
State(component_id="graph_scalars_options", component_property="children"),
State(component_id="filters", component_property="children"),
State(component_id="colors", component_property="value"),
State(component_id="labels", component_property="value"),
State(component_id="order_by", component_property="value"),
State(component_id="aggregation_group_by", component_property="value"),
State(component_id="dd_scenario", component_property="value"),
],
prevent_initial_call=True,
)
def scalar_graph(
_,
__,
___,
show_data_cls,
units_div,
graph_scalars_options,
filter_div,
colors,
labels,
order_by,
agg_group_by,
scenarios,
):
if scenarios is None:
raise PreventUpdate
# Check if data shall be shown:
show_data = show_data_cls and "active" in show_data_cls
data_div_cls = no_update, no_update, no_update
ctx = dash.callback_context
if "view-dashboard-data" in ctx.triggered[0]["prop_id"]:
if show_data:
raise PreventUpdate
show_data = True
data_div_cls = "view view--dashboard", "view view--dashboard-data active", {}
elif "view-dashboard" in ctx.triggered[0]["prop_id"]:
if not show_data:
raise PreventUpdate
show_data = False
data_div_cls = "view view--dashboard active", "view view--dashboard-data", {"display": "none"}
data = get_multiple_scenario_data(*scenarios, table="oed_scalars")
filters = preprocessing.extract_filters("scalars", filter_div)
units = preprocessing.extract_unit_options(units_div)
graph_options = preprocessing.extract_graph_options(graph_scalars_options)
colors = preprocessing.extract_colors(colors)
graph_options["options"]["color_discrete_map"] = colors
labels = preprocessing.extract_labels(labels)
try:
preprocessed_data = preprocessing.prepare_scalars(
data, order_by, agg_group_by, units, filters, labels
)
except preprocessing.PreprocessingError:
log_div, log_level = show_logs()
return graphs.get_empty_fig(), [], [], log_div, log_level, *data_div_cls
if preprocessed_data.empty:
flash("No data for current filter settings", "warning")
log_div, log_level = show_logs()
return graphs.get_empty_fig(), [], [], log_div, log_level, *data_div_cls
try:
fig = graphs.get_scalar_plot(preprocessed_data, graph_options)
except graphs.PlottingError:
log_div, log_level = show_logs()
return graphs.get_empty_fig(), [], [], log_div, log_level, *data_div_cls
if show_data:
columns = [{"name": i, "id": i} for i in preprocessed_data.columns]
data_table = preprocessed_data.applymap(str).to_dict("records")
else:
columns = []
data_table = []
log_div, log_level = show_logs()
return fig, data_table, columns, log_div, log_level, *data_div_cls
@app.callback(
[
Output(component_id="graph_timeseries", component_property="figure"),
Output(component_id="table_timeseries", component_property="data"),
Output(component_id="table_timeseries", component_property="columns"),
Output(component_id="graph_timeseries_error", component_property="children"),
Output(component_id="tab_timeseries_error", component_property="labelClassName"),
Output(component_id="view-dashboard_timeseries", component_property="className"),
Output(component_id="view-dashboard-data_timeseries", component_property="className"),
Output(component_id="table_div_timeseries", component_property="style"),
],
[
Input(component_id="refresh_timeseries", component_property="n_clicks"),
Input(component_id="view-dashboard_timeseries", component_property="n_clicks"),
Input(component_id="view-dashboard-data_timeseries", component_property="n_clicks"),
],
[
State(component_id="view-dashboard-data_timeseries", component_property="className"),
State(component_id="units", component_property="children"),
State(component_id="graph_timeseries_options", component_property="children"),
State(component_id="filters", component_property="children"),
State(component_id="colors", component_property="value"),
State(component_id="labels", component_property="value"),
State(component_id="order_by", component_property="value"),
State(component_id="aggregation_group_by", component_property="value"),
State(component_id="dd_scenario", component_property="value"),
],
prevent_initial_call=True,
)
def timeseries_graph(
_,
__,
___,
show_data_cls,
units_div,
graph_timeseries_options,
filter_div,
colors,
labels,
order_by,
agg_group_by,
scenarios,
):
if scenarios is None or SKIP_TS:
raise PreventUpdate
# Check if data shall be shown:
show_data = show_data_cls and "active" in show_data_cls
data_div_cls = no_update, no_update, no_update
ctx = dash.callback_context
if "view-dashboard-data" in ctx.triggered[0]["prop_id"]:
if show_data:
raise PreventUpdate
data_div_cls = "view view--dashboard", "view view--dashboard-data active", {}
elif "view-dashboard" in ctx.triggered[0]["prop_id"]:
if not show_data:
raise PreventUpdate
show_data = False
data_div_cls = "view view--dashboard active", "view view--dashboard-data", {"display": "none"}
data = get_multiple_scenario_data(*scenarios, table="oed_timeseries")
filters = preprocessing.extract_filters("timeseries", filter_div)
units = preprocessing.extract_unit_options(units_div)
graph_options = preprocessing.extract_graph_options(graph_timeseries_options)
colors = preprocessing.extract_colors(colors)
graph_options["options"]["color_discrete_map"] = colors
labels = preprocessing.extract_labels(labels)
try:
preprocessed_data = preprocessing.prepare_timeseries(
data, order_by, agg_group_by, units, filters, labels
)
except preprocessing.PreprocessingError:
log_div, log_level = show_logs()
return graphs.get_empty_fig(), [], [], log_div, log_level, *data_div_cls
if preprocessed_data.empty:
flash("No data for current filter settings", "warning")
log_div, log_level = show_logs()
return graphs.get_empty_fig(), [], [], log_div, log_level, *data_div_cls
try:
fig = graphs.get_timeseries_plot(preprocessed_data, graph_options)
except graphs.PlottingError:
log_div, log_level = show_logs()
return graphs.get_empty_fig(), [], [], log_div, log_level, *data_div_cls
if show_data:
columns = [{"name": i, "id": i} for i in preprocessed_data.columns]
data_table = preprocessed_data.applymap(str).to_dict("records")
else:
columns = []
data_table = []
log_div, log_level = show_logs()
return fig, data_table, columns, log_div, log_level, *data_div_cls
def show_logs():
errors = get_flashed_messages(category_filter=["error"])
warnings = get_flashed_messages(category_filter=["warning"])
if len(warnings) > MAX_WARNINGS:
warnings = warnings[:MAX_WARNINGS]
warnings.append(
f"Too many warnings (>{MAX_WARNINGS}) - Skipping further warnings..."
)
infos = get_flashed_messages(category_filter=["info"])
if len(infos) > MAX_INFOS:
infos = infos[:MAX_INFOS]
infos.append(f"Too | |
<filename>lectures/Python/9_Data_structures/PiRaP-2020-lecture-9.py
# auxiliary function for cleaning the workspace
def clear_all():
gl = globals().copy()
for var in gl:
if var[0] == '_': continue
if 'func' in str(globals()[var]): continue
if 'module' in str(globals()[var]): continue
del globals()[var]
# --------------------------------------------------------------------
# Basics
# --------------------------------------------------------------------
# Importing modules
# load modules
import math # system module
#import mylib # own module
y = math.sin(math.pi)
#z = mylib.myfun()
# import single element
from math import pi
y = math.sin(pi)
# import everything
from math import *
y = sin(pi)
# alias
import math as m
y = m.sin(m.pi)
# --------------------------------------------------------------------
# Functions
# no return value
def display(x):
print(x)
# return value
def sqr(x):
return x * x
# function that does nothing
def doNothing():
pass
# conditional return
def geoMean(x,y):
if x < 0 or y < 0: return None
else: return math.sqrt(x * y)
b = display('abc') # b is None
p = sqr(2) # p is 4
q = geoMean(2, 8) # q is 4
r = geoMean(-2, 8) # r is None
# default and named arguments
def fun(x, y=10, s='abc'):
print(x,y,s)
fun(0) # fun(0, 10, 'abc')
fun(1, 3.14, 'xyz')
fun(2, s='PS') # named argument - fun(2, 10, 'PS')
fun(y=4, x=1) # named arguments - fun(1, 4, 'abc')
fun(5, x=1) # error: x passed twice
def fun2(x=1, y): # error: non-default follows default
pass
# --------------------------------------------------------------------
# Iterations
# Iterate over collection
V = ['a', 'b', 'c'] # list comprehension - creates a list from values
for e in V:
print(e)
# Iterate over range (right excluded)
for i in range(0,10): # [0, 1, ... 9]
print(i)
# while loop
i = 10
while i > 0:
print(i)
i -= 1
# 100, 95, ...,5
for i in range(100, 0, -5): print(i)
# reversed collection
for e in reversed(V): print(e)
# break, continue, loop-else
for n in range(2, 10):
for x in range(2, n):
if n % x == 0:
print(f'{n} = {x} * {n//x}')
break
else: # concerns for loop - executed when loop exited normally (not by break)
print ('{} is a prime number'.format(n))
# --------------------------------------------------------------------
# String type
#
# - Immutable - you cannot alter the string, you can only create
# a new one on the basis of the existing one.
# - Assignment operator makes a copy.
# - Indexing using slices: [begin:end:step].
# - 0-based, end is an element after the slice.
clear_all()
# Accessing elements
s = 'Dog'
c = s[1] # 'o' - get single character
s0 = s[0:len(s)] # 'Dog' - copy entire string
s1 = s[1:] # 'og' - copy all beside the first char
s2 = s[:len(s)-1] # 'Do' - copy all beside the last char
s3 = s[:-1] # 'Do' - -||-
s4 = s[::2] # 'Dg' - copy every second character
s[1] = 'a' # error: immutable
s = s[0] + 'a' + s[2] # 'Dag'
t = s * 2 # 'DagDag'
# Capitalization
s = 'Piotr'
s = s.lower() # 'piotr'
flag = s.islower() # True
s = s.capitalize() # 'Piotr'
# Finding substrings
s = 'Ala ma kota'
print(s.find('a')) # 2 - index of the first match
print(s.find('ko')) # 7 - index of the first match
print(s.find('a', 3)) # 5 - index of the first match starting from 3
print(s.rfind('a')) # 10 - index of the last match
print(s.find('q')) # -1 - no match
f1 = 'la' in s # True
f2 = 'abc' in s # False
# Checking characters
s = '1234'
print(s.isalnum())
print(s.isnumeric())
print(s.isalpha())
print('abc'.isalpha())
# Multi-line strings
a = 'first line\nsecond line' # escape characters
# Triple quoute string
b = '''first line
second line'''
# Splitting
s = '192.168.0.0'
ip = s.split('.') # create list of strings
lines = a.splitlines()
# --------------------------------------------------------------------
# List type
#
# - Mutable - you can alter elements of the container.
# - Assignment operator makes an alias.
# - Can store elements of any type.
# - Indexing using slices: [begin:end:step].
# - 0-based, end is an element after the slice.
clear_all()
# Accessing elements
t = [1, 3.14, True, [2, 'xyz']] # list comprehension
print(t)
x = t[1] # x = 3.14
t[2] = False # ok – mutable type
t[3][1] = 'abc' # ok
1 in t # True
2 in t # False – it is in sublist
# Aliasing
p = [1, 2, 3]
q = p # q points to p (aliasing)
q[1] = 10 # modify p as well
print(p, '-', q) # 1 10 3 - 1 10 3
r = p[:] # explicit copy
r[0:2] = ['a', 'b']
print(p, '-', r) # 1 100 3 - a b 3
# Adding elements
t = list() # empty list (alternative syntax)
t += ['a', 'b', 'c'] # modify - extend with sublist
t.extend(['e', 'f']) # modify - extend with sublist
t.append('d') # modify - add an element (character)
t.append(['g']) # modify - add an element (sublist)
t = t + ['k', 'i'] # create new and store in t
u = t * 2 # create new
# Removing elements
x = t.pop(1) # remove 'b' and assign result to x
del t[1] # remove'c'
del t[4:6] # remove 'g' i 'h'
t.append('a')
t.remove('a') # remove first 'a'
# Sorting elements
t.sort()
# --------------------------------------------------------------------
# Iterators
#
# - objects for iteration over collections
# - allow creating iterable collections
I = iter([1, 2, 3]) # list iterator
print(I.__next__()) # 1
print(I.__next__()) # 2
print(I.__next__()) # 3
print(I.__next__()) # exception
# loop using iterator
I = iter([1, 2, 3]) # list iterator
for i in I:
print(i)
# --------------------------------------------------------------------
# Generators
#
# - allow generation of values,
# - lazy evaluation (values generated when needed),
# - are iterable objects,
# define generator object that creates characters from given range
def genChars(c1, c2):
for c in range(ord(c1), ord(c2)+1):
yield chr(c) # preserve a state
gen = genChars('a','z')
# in a loop
for x in gen:
print(x)
# does nothing - generator can be iterated through only once
for x in gen:
print(x)
# range function also creates a generator
# (from Python 3, in Python 2 xrange should be used)
clear_all()
N = 10000000
G = range(0,N)
print(sum(G)) # passing iterable object to the function
# unpacking generator to list
L = [*G]
M = [G] # a list with generator as an element
# sum of squares of even numbers from the list
V = [0,12,4,6,242,7,9]
s = 0
for x in V:
if x % 2 == 0:
s = s + x*x
print(s)
# list
L = [x*x for x in V if x % 2 == 0]
print(sum(L))
print(L[2])
# generator – w/o list
gen = (x*x for x in V if x % 2 == 0)
print(sum(gen))
print(gen[2]) # error - cannot use subscriptfor generators
# performance comparison - list vs generator
import time
N = 100000000
t = time.perf_counter()
L = [x/N for x in range(0,N)]
print(sum(L))
print("List:", time.perf_counter() - t)
t = time.perf_counter()
G = (x/N for x in range(0,N))
print(sum(G))
print("Generator:", time.perf_counter() - t)
# --------------------------------------------------------------------
# Tuple type
#
# - Similar to list, but immutable.
clear_all()
t1 = 1, 2, 3 # declaration
t2 = ('a', 'c', 'd') # parentheses () are optional
t3 = 'q', # one-element tuple
t4 = t2[1:3] # ('c', 'd')
t5 = t4, 'e' # (('c', 'd'), 'e')
t6 = t4 + t3 # ('c', 'd', 'q')
t7 = t4 + ('a',) # ('c', 'd', 'a')
t7[1] = 4; # error – immutable
t1,t2 = t2,t1 # value swap
# iteration over collection: index + value
X = ['a', 'b', 'c']
for i, x in enumerate(X):
print(f'X[{i}]={x}')
# iteration over several collections (adjust to the shortest)
Y = [0.4, 11, -10]
Z = [True, False, False, True]
for x, y, z in zip(X, Y, Z):
print(x, y, z)
# returning multiple values from a function
def getFirstLast(L):
return L[0], L[-1]
L = [1, 2, 3, 4, 5, 6, 7, 8]
f, l = getFirstLast(L)
f_l = getFirstLast(L)
print(f_l) # (1,8)
# function with variable number of parameters
def add(*args):
s = 0
for e in args:
s += e
return s
print(add(1, 2, 3, 4, 5)) # sum the parameters
t = (9, 8, 7)
L = [*t] # „unpacking" a tuple to list
# sorting using multiple criteria
names = ['Stan', 'Peter' , 'Alice']
salaries = [3000, 2000, 2000]
zipped = [*zip(salaries, names)] # zip is an iterable object - it has to be unpacked to list
zipped.sort() # sort list
print(zipped) # [(2000, 'Alice'), (2000, 'Peter'), (3000, 'Stan')]
#
#
# Powerpoint time...
#
#
# --------------------------------------------------------------------
# Set type
#
# - Stores keys (immutable),
# - implemented as hashtable (keys are not sorted)
clear_all()
S = set()
S.add(1)
S.add('pqr')
S.add([33]) # error, lists are mutable - cannot be stored in set
S.add((35,)) # ok, tuples are immutable
S.add(1) # already exist
S.remove('pqr') # ok, exists
S.remove(55) # error, doesn't exist
start = 1800
end = 2200
A = {x for x in range(start,end) if x % 4 == 0} # divisible by 4 (set comprehension)
B = {x for x in range(start,end) if x % 100 == 0} # divisible by 100
C = {x for x | |
<reponame>ace-ecosystem/ace2-core
# vim: ts=4:sw=4:et:cc=120
#
import asyncio
import os
import os.path
import tempfile
import shutil
import ace.analysis
from ace.analysis import RootAnalysis, Observable, AnalysisModuleType, Analysis
from ace.logging import get_logger
from ace.constants import EVENT_ANALYSIS_ROOT_COMPLETED
from ace.system.distributed import app
from ace.system.events import EventHandler, Event
from ace.module.base import AnalysisModule, MultiProcessAnalysisModule
from ace.module.manager import AnalysisModuleManager, CONCURRENCY_MODE_PROCESS, CONCURRENCY_MODE_THREADED
from tests.systems import RemoteACETestSystem
import pytest
@pytest.mark.asyncio
@pytest.mark.system
async def test_basic_analysis_async(manager):
# basic analysis module
class TestAsyncAnalysisModule(AnalysisModule):
# define the type for this analysis module
type = AnalysisModuleType("test", "")
# define it as an async module
async def execute_analysis(self, root, observable, analysis):
analysis.set_details({"test": "test"})
analysis.add_observable("test", "hello")
return True
# create an instance of it
module = TestAsyncAnalysisModule()
# register the type to the core
await manager.system.register_analysis_module_type(module.type)
# submit a root for analysis so we create a new job
root = manager.system.new_root()
observable = root.add_observable("test", "test")
await root.submit()
manager.add_module(module)
await manager.run_once()
# check the results in the core
root = await manager.system.get_root_analysis(root)
observable = root.get_observable(observable)
analysis = observable.get_analysis(module.type)
assert analysis
assert await analysis.get_details() == {"test": "test"}
assert analysis.observables[0] == ace.analysis.Observable("test", "hello")
class TestMultiProcessAnalysisModule(AnalysisModule):
__test__ = False
# define the type for this analysis module
type = AnalysisModuleType("test", "")
# mark it as multi process
is_multi_process: bool = False
async def execute_analysis(self, root, observable, analysis):
analysis.set_details({"test": "test"})
analysis.add_observable("test", "hello")
return True
@pytest.mark.asyncio
@pytest.mark.system
async def test_basic_analysis_sync(manager):
# create an instance of it
module = TestMultiProcessAnalysisModule()
# register the type to the core
await manager.system.register_analysis_module_type(module.type)
# submit a root for analysis so we create a new job
root = manager.system.new_root()
observable = root.add_observable("test", "test")
await root.submit()
# create a new manager to run our analysis modules
manager.add_module(module)
await manager.run_once()
# check the results in the core
root = await manager.system.get_root_analysis(root)
observable = root.get_observable(observable)
analysis = observable.get_analysis(module.type)
assert analysis
assert await analysis.get_details() == {"test": "test"}
assert analysis.observables[0] == ace.analysis.Observable("test", "hello")
@pytest.mark.asyncio
@pytest.mark.integration
async def test_force_stop_stuck_async_task(manager):
control = asyncio.Event()
class CustomAnalysisModule(AnalysisModule):
async def execute_analysis(self, root, observable, analysis):
nonlocal control
control.set()
# get stuck
import sys
await asyncio.sleep(sys.maxsize)
# register the type to the core
amt = AnalysisModuleType("test", "")
await manager.system.register_analysis_module_type(amt)
module = CustomAnalysisModule(amt)
manager.add_module(module)
root = manager.system.new_root()
observable = root.add_observable("test", "test")
await root.submit()
async def _cancel():
nonlocal control
nonlocal manager
await control.wait()
manager.force_stop()
cancel_task = asyncio.get_event_loop().create_task(_cancel())
await manager.run()
await cancel_task
class StuckAnalysisModule(MultiProcessAnalysisModule):
async def execute_analysis(self, root, observable, analysis):
# get stuck
import time, sys
time.sleep(1000)
@pytest.mark.asyncio
@pytest.mark.integration
async def test_force_stop_stuck_sync_task(manager):
# there's nothing you can do when concurrency is threaded
if manager.concurrency_mode == CONCURRENCY_MODE_THREADED:
pytest.skip(f"cannot test in concurrency_mode {manager.concurrency_mode}")
# register the type to the core
amt = AnalysisModuleType("test", "")
await manager.system.register_analysis_module_type(amt)
module = StuckAnalysisModule(amt)
manager.add_module(module)
root = manager.system.new_root()
observable = root.add_observable("test", "test")
await root.submit()
async def _cancel():
nonlocal manager
manager.force_stop()
manager_task = asyncio.get_event_loop().create_task(manager.run())
await asyncio.wait([manager_task], timeout=0.01)
cancel_task = asyncio.get_event_loop().create_task(_cancel())
await manager_task
await cancel_task
@pytest.mark.asyncio
@pytest.mark.integration
async def test_raised_exception_during_async_analysis(manager):
class CustomAnalysisModule(AnalysisModule):
async def execute_analysis(self, root, observable, analysis):
raise RuntimeError("failure")
amt = AnalysisModuleType("test", "")
await manager.system.register_analysis_module_type(amt)
module = CustomAnalysisModule(amt)
manager.add_module(module)
root = manager.system.new_root()
observable = root.add_observable("test", "test")
await root.submit()
await manager.run_once()
root = await manager.system.get_root_analysis(root)
observable = root.get_observable(observable)
analysis = observable.get_analysis(amt)
assert analysis.error_message == "testv1.0.0 failed analyzing type test value test: failure"
assert analysis.stack_trace
class FailingAnalysisModule(MultiProcessAnalysisModule):
async def execute_analysis(self, root, observable, analysis):
raise RuntimeError("failure")
@pytest.mark.asyncio
@pytest.mark.integration
async def test_raised_exception_during_sync_analysis(manager):
amt = AnalysisModuleType("test", "")
await manager.system.register_analysis_module_type(amt)
module = FailingAnalysisModule(amt)
manager.add_module(module)
root = manager.system.new_root()
observable = root.add_observable("test", "test")
await root.submit()
await manager.run_once()
root = await manager.system.get_root_analysis(root)
observable = root.get_observable(observable)
analysis = observable.get_analysis(amt)
assert analysis.error_message == "testv1.0.0 failed analyzing type test value test: failure"
assert analysis.stack_trace
class CrashingAnalysisModule(MultiProcessAnalysisModule):
async def execute_analysis(self, root, observable, analysis):
import os, signal
if observable.value == "crash":
os.kill(os.getpid(), signal.SIGKILL)
else:
analysis.set_details({"test": "test"})
class SimpleSyncAnalysisModule(MultiProcessAnalysisModule):
async def execute_analysis(self, root, observable, analysis):
analysis.set_details({"test": "test"})
@pytest.mark.asyncio
@pytest.mark.integration
async def test_crashing_sync_analysis_module(manager):
if manager.concurrency_mode == CONCURRENCY_MODE_THREADED:
pytest.skip(f"cannot test in concurrency_mode {manager.concurrency_mode}")
sync = asyncio.Event()
class CustomEventHandler(EventHandler):
async def handle_event(self, event: Event):
sync.set()
async def handle_exception(self, event: str, exception: Exception):
pass
# TODO when events are distributed modify this to use that
await app.state.system.register_event_handler(EVENT_ANALYSIS_ROOT_COMPLETED, CustomEventHandler())
amt_crashing = AnalysisModuleType("crash_test", "")
amt_ok = AnalysisModuleType("ok", "")
await manager.system.register_analysis_module_type(amt_crashing)
await manager.system.register_analysis_module_type(amt_ok)
# this is only supported in CONCURRENCY_MODE_PROCESS
crashing_module = CrashingAnalysisModule(amt_crashing)
ok_module = SimpleSyncAnalysisModule(amt_ok)
manager.add_module(crashing_module)
manager.add_module(ok_module)
root = manager.system.new_root()
observable = root.add_observable("test", "crash")
await root.submit()
await manager.run_once()
# wait for analysis to complete
assert await sync.wait()
root = await manager.system.get_root_analysis(root)
observable = root.get_observable(observable)
analysis = observable.get_analysis(amt_crashing)
assert analysis.error_message == "crash_testv1.0.0 process crashed when analyzing type test value crash"
assert analysis.stack_trace
observable = root.get_observable(observable)
analysis = observable.get_analysis(amt_ok)
#
# the behavior of what happens to the other analysis modules that happen to
# be running in the same manager seems to be undefined, so there's really
# no way to test for that
#
# assert (
# analysis.error_message == "okv1.0.0 process crashed when analyzing type test value crash"
# and analysis.stack_trace
# ) or await analysis.get_details() == {"test": "test"}
@pytest.mark.asyncio
@pytest.mark.integration
async def test_upgraded_version_analysis_module(manager):
# cannot test this in process concurrency mode because it requires shared events
if manager.concurrency_mode == CONCURRENCY_MODE_PROCESS:
pytest.skip(f"cannot test in concurrency_mode {manager.concurrency_mode}")
# NOTE for this one we don't need to test both sync and async because
# this check comes before analysis module execution (same for both)
step_1 = asyncio.Event()
class CustomAnalysisModule(MultiProcessAnalysisModule):
async def execute_analysis(self, root, observable, analysis):
nonlocal step_1
analysis.set_details({"version": self.type.version})
if not step_1.is_set():
step_1.set()
return
amt = AnalysisModuleType("test", "", version="1.0.0")
await manager.system.register_analysis_module_type(amt)
module = CustomAnalysisModule(type=amt)
manager.add_module(module)
root = manager.system.new_root()
observable = root.add_observable("test", "test")
await root.submit()
root_2 = manager.system.new_root()
observable_2 = root_2.add_observable("test", "test")
async def _upgrade():
nonlocal step_1
nonlocal root_2
await step_1.wait()
updated_amt = AnalysisModuleType("test", "", version="1.0.1")
await manager.system.register_analysis_module_type(updated_amt)
await root_2.submit()
upgrade_task = asyncio.create_task(_upgrade())
await manager.run()
await upgrade_task
# in this case the version mismatch just causes the manger to exit
root = await manager.system.get_root_analysis(root_2)
observable = root.get_observable(observable)
# so no analysis should be seen
assert observable.get_analysis(amt) is None
@pytest.mark.asyncio
@pytest.mark.integration
async def test_upgraded_extended_version_async_analysis_module(manager):
"""Tests the ability of an analysis module to update extended version data."""
#
# in this case the first call to get_next_analysis_request fails
# but the module.upgrade() is called
# since the work task is not acquired it stays in the queue
# until the event_loop comes back around with the correct extended version data
#
step_1 = asyncio.Event()
step_2 = asyncio.Event()
class CustomAnalysisModule(AnalysisModule):
async def execute_analysis(self, root, observable, analysis):
nonlocal step_1
analysis.set_details({"extended_version": self.type.extended_version})
if not step_1.is_set():
step_1.set()
return
step_2.set()
async def upgrade(self):
self.type.extended_version = {"intel": "v2"}
amt = AnalysisModuleType("test", "", extended_version={"intel": "v1"})
await manager.system.register_analysis_module_type(amt)
module = CustomAnalysisModule(type=amt)
manager.add_module(module)
root = manager.system.new_root()
observable = root.add_observable("test", "test")
await root.submit()
root_2 = manager.system.new_root()
observable_2 = root_2.add_observable("test", "test")
async def _update_intel():
nonlocal step_1
nonlocal root_2
await step_1.wait()
# update the extended version data for this module type
updated_amt = AnalysisModuleType("test", "", extended_version={"intel": "v2"})
await manager.system.register_analysis_module_type(updated_amt)
await root_2.submit()
async def _shutdown():
nonlocal step_2
nonlocal manager
await step_2.wait()
manager.stop()
upgrade_task = asyncio.create_task(_update_intel())
shutdown_task = asyncio.create_task(_shutdown())
await manager.run()
await upgrade_task
await shutdown_task
root = await manager.system.get_root_analysis(root_2)
observable = root.get_observable(observable)
assert (await observable.get_analysis(amt).get_details())["extended_version"] == {"intel": "v2"}
class UpgradableAnalysisModule(MultiProcessAnalysisModule):
async def execute_analysis(self, root, observable, analysis):
analysis.set_details({"extended_version": self.type.extended_version})
async def upgrade(self):
self.type.extended_version = {"intel": "v2"}
@pytest.mark.parametrize("concurrency_mode", [CONCURRENCY_MODE_THREADED, CONCURRENCY_MODE_PROCESS])
@pytest.mark.asyncio
@pytest.mark.integration
async def test_upgraded_extended_version_sync_analysis_module(concurrency_mode, redis_url, manager):
"""Tests the ability of a sync analysis module to update extended version data."""
# we want to bail after the first execution of the module
class CustomAnalysisModuleManager(AnalysisModuleManager):
async def execute_module(self, *args, **kwargs):
try:
result = await AnalysisModuleManager.execute_module(self, *args, **kwargs)
finally:
self.shutdown = True
return result
custom_manager = CustomAnalysisModuleManager(
manager.system, RemoteACETestSystem, (manager.system.api.api_key,), concurrency_mode=concurrency_mode
)
root_analysis_completed = asyncio.Event()
class CustomEventHandler(EventHandler):
async def handle_event(self, event: Event):
root_analysis_completed.set()
async def handle_exception(self, event: str, exception: Exception):
pass
# TODO when events are distributed modify this to use that
await app.state.system.register_event_handler(EVENT_ANALYSIS_ROOT_COMPLETED, CustomEventHandler())
amt = AnalysisModuleType("test", "", extended_version={"intel": "v1"})
await custom_manager.system.register_analysis_module_type(amt)
module = UpgradableAnalysisModule(type=amt)
custom_manager.add_module(module)
root = custom_manager.system.new_root()
observable = root.add_observable("test", "test")
async def _update_intel():
nonlocal custom_manager
# wait for the event loop to start
await custom_manager.event_loop_starting_event.wait()
# update the extended version data for this module type
updated_amt = AnalysisModuleType("test", "", extended_version={"intel": "v2"})
await custom_manager.system.register_analysis_module_type(updated_amt)
# and then submit for analysis
await root.submit()
upgrade_task = asyncio.create_task(_update_intel())
await custom_manager.run()
await upgrade_task
await root_analysis_completed.wait()
root = await custom_manager.system.get_root_analysis(root)
observable = root.get_observable(observable)
assert (await observable.get_analysis(amt).get_details())["extended_version"] == {"intel": "v2"}
@pytest.mark.asyncio
@pytest.mark.integration
async def test_upgrade_analysis_module_failure(manager):
amt = AnalysisModuleType("test", "", extended_version={"intel": "v1"})
await manager.system.register_analysis_module_type(amt)
class CustomAnalysisModule(MultiProcessAnalysisModule):
async | |
<reponame>Nicholas-7/cuml
#
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import gc
import pytest
import cupy as cp
import cudf
import numpy as np
import operator
from copy import deepcopy
from numba import cuda
from cudf.core.buffer import Buffer
from cuml.common.array import CumlArray
from cuml.common.memory_utils import _get_size_from_shape
from cuml.common.memory_utils import _strides_to_order
from rmm import DeviceBuffer
if sys.version_info < (3, 8):
try:
import pickle5 as pickle
except ImportError:
import pickle
else:
import pickle
test_input_types = [
'numpy', 'numba', 'cupy', 'series', None
]
test_output_types = {
'numpy': np.ndarray,
'cupy': cp.ndarray,
'numba': None,
'series': cudf.Series,
'dataframe': cudf.DataFrame,
'cudf': None
}
test_dtypes_all = [
np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64
]
test_dtypes_output = [
np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64
]
test_shapes = [10, (10,), (10, 1), (10, 5), (1, 10)]
test_slices = [0, 5, 'left', 'right', 'both', 'bool_op']
unsupported_cudf_dtypes = [np.uint8, np.uint16, np.uint32, np.uint64,
np.float16]
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_array_init(input_type, dtype, shape, order):
if input_type == 'series':
if dtype in unsupported_cudf_dtypes or \
shape in [(10, 5), (1, 10)]:
pytest.skip("Unsupported cuDF Series parameter")
inp, ary, ptr = create_ary_init_tests(input_type, dtype, shape, order)
if shape == (10, 5):
assert ary.order == order
if shape == 10:
assert ary.shape == (10,)
assert len(ary) == 10
elif input_type == 'series':
# cudf Series make their shape (10,) from (10, 1)
if shape == (10, 1):
assert ary.shape == (10,)
else:
assert ary.shape == shape
assert ary.dtype == np.dtype(dtype)
if (input_type == "numpy"):
assert isinstance(ary._owner, cp.ndarray)
truth = cp.asnumpy(inp)
del inp
assert ary.ptr == ptr
data = ary.to_output('numpy')
assert np.array_equal(truth, data)
else:
helper_test_ownership(ary, inp, False)
@pytest.mark.parametrize('input_type', test_input_types)
def test_ownership_with_gc(input_type):
# garbage collection slows down the test suite significantly, we only
# need to test for each input type, not for shapes/dtypes/etc.
if input_type == 'numpy':
pytest.skip("test not valid for numpy input")
inp, ary, ptr = create_ary_init_tests(input_type, np.float32, (10, 10),
'F')
helper_test_ownership(ary, inp, True)
def create_ary_init_tests(ary_type, dtype, shape, order):
if ary_type is not None:
inp = create_input(ary_type, dtype, shape, order)
ary = CumlArray(data=inp)
ptr = ary.ptr
else:
inp = create_input('cupy', dtype, shape, order)
ptr = inp.__cuda_array_interface__['data'][0]
ary = CumlArray(data=ptr, owner=inp, dtype=inp.dtype, shape=inp.shape,
order=order)
return (inp, ary, ptr)
def get_owner(curr):
if (isinstance(curr, CumlArray)):
return curr._owner
elif (isinstance(curr, cp.ndarray)):
return curr.data.mem._owner
else:
return None
def helper_test_ownership(ary, inp, garbage_collect):
found_owner = False
# Make sure the input array is in the ownership chain
curr_owner = ary
while (curr_owner is not None):
if (curr_owner is inp):
found_owner = True
break
curr_owner = get_owner(curr_owner)
assert found_owner, "GPU input arrays must be in the owner chain"
inp_copy = deepcopy(cp.asarray(inp))
# testing owner reference keeps data of ary alive
del inp
if garbage_collect:
# Force GC just in case it lingers
gc.collect()
assert cp.all(cp.asarray(ary._owner) == cp.asarray(inp_copy))
@pytest.mark.parametrize('data_type', [bytes, bytearray, memoryview])
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_array_init_from_bytes(data_type, dtype, shape, order):
dtype = np.dtype(dtype)
bts = bytes(_get_size_from_shape(shape, dtype)[0])
if data_type != bytes:
bts = data_type(bts)
ary = CumlArray(bts, dtype=dtype, shape=shape, order=order)
if shape == (10, 5):
assert ary.order == order
if shape == 10:
assert ary.shape == (10,)
else:
assert ary.shape == shape
assert ary.dtype == dtype
cp_ary = cp.zeros(shape, dtype=dtype)
assert cp.all(cp.asarray(cp_ary) == cp_ary)
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_array_init_bad(input_type, dtype, shape, order):
"""
This test ensures that we assert on incorrect combinations of arguments
when creating CumlArray
"""
if input_type == 'series':
if dtype == np.float16:
pytest.skip("Skipping due to cuDF issue #9065")
inp = create_input(input_type, dtype, shape, 'C')
else:
inp = create_input(input_type, dtype, shape, order)
# Ensure the array is creatable
cuml_ary = CumlArray(inp)
with pytest.raises(AssertionError):
CumlArray(inp, dtype=cuml_ary.dtype)
with pytest.raises(AssertionError):
CumlArray(inp, shape=cuml_ary.shape)
with pytest.raises(AssertionError):
CumlArray(inp,
order=_strides_to_order(cuml_ary.strides, cuml_ary.dtype))
assert cp.all(cp.asarray(inp) == cp.asarray(cuml_ary))
@pytest.mark.parametrize('slice', test_slices)
@pytest.mark.parametrize('order', ['C', 'F'])
def test_get_set_item(slice, order):
if order == 'F' and slice != 'both':
pytest.skip("See issue https://github.com/rapidsai/cuml/issues/2412")
inp = create_input('numpy', 'float32', (10, 10), order)
ary = CumlArray(data=inp)
if isinstance(slice, int):
assert np.array_equal(inp[slice], ary[slice].to_output('numpy'))
inp[slice] = 1.0
ary[slice] = 1.0
elif slice == 'left':
assert np.array_equal(inp[5:], ary[5:].to_output('numpy'))
inp[5:] = 1.0
ary[5:] = 1.0
elif slice == 'right':
assert np.array_equal(inp[:5], ary[:5].to_output('numpy'))
inp[:5] = 1.0
ary[:5] = 1.0
elif slice == 'both':
assert np.array_equal(inp[:], ary[:].to_output('numpy'))
inp[:] = 1.0
ary[:] = 1.0
else:
pytest.skip("not implemented logical indexing, unless we need it")
assert np.array_equal(inp, ary.to_output('numpy'))
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('order', ['C', 'F'])
def test_create_empty(shape, dtype, order):
ary = CumlArray.empty(shape=shape, dtype=dtype, order=order)
assert isinstance(ary.ptr, int)
if shape == 10:
assert ary.shape == (shape,)
else:
assert ary.shape == shape
assert ary.dtype == np.dtype(dtype)
assert isinstance(ary._owner.data.mem._owner, DeviceBuffer)
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_create_zeros(shape, dtype, order):
ary = CumlArray.zeros(shape=shape, dtype=dtype, order=order)
test = cp.zeros(shape).astype(dtype)
assert cp.all(test == cp.asarray(ary))
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_create_ones(shape, dtype, order):
ary = CumlArray.ones(shape=shape, dtype=dtype, order=order)
test = cp.ones(shape).astype(dtype)
assert cp.all(test == cp.asarray(ary))
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_create_full(shape, dtype, order):
value = cp.array([cp.random.randint(100)]).astype(dtype)
ary = CumlArray.full(value=value[0], shape=shape, dtype=dtype, order=order)
test = cp.zeros(shape).astype(dtype) + value[0]
assert cp.all(test == cp.asarray(ary))
@pytest.mark.parametrize('output_type', test_output_types)
@pytest.mark.parametrize('dtype', test_dtypes_output)
@pytest.mark.parametrize('out_dtype', test_dtypes_output)
@pytest.mark.parametrize('order', ['F', 'C'])
@pytest.mark.parametrize('shape', test_shapes)
def test_output(output_type, dtype, out_dtype, order, shape):
inp = create_input('numpy', dtype, shape, order)
ary = CumlArray(inp)
if dtype in unsupported_cudf_dtypes and \
output_type in ['series', 'dataframe', 'cudf']:
with pytest.raises(ValueError):
res = ary.to_output(output_type)
elif shape in [(10, 5), (1, 10)] and output_type == 'series':
with pytest.raises(ValueError):
res = ary.to_output(output_type)
else:
res = ary.to_output(output_type)
# using correct numba ndarray check
if output_type == 'numba':
assert cuda.devicearray.is_cuda_ndarray(res)
elif output_type == 'cudf':
if shape in [(10, 5), (1, 10)]:
assert isinstance(res, cudf.DataFrame)
else:
assert isinstance(res, cudf.Series)
else:
assert isinstance(res, test_output_types[output_type])
if output_type == 'numpy':
assert np.all(inp == ary.to_output('numpy'))
elif output_type == 'cupy':
assert cp.all(cp.asarray(inp) == ary.to_output('cupy'))
elif output_type == 'numba':
assert cp.all(cp.asarray(cuda.to_device(inp)) == cp.asarray(res))
elif output_type == 'series':
comp = cudf.Series(np.ravel(inp)) == res
assert np.all(comp.to_array())
elif output_type == 'dataframe':
if len(inp.shape) == 1:
inp = inp.reshape(inp.shape[0], 1)
comp = cudf.DataFrame(inp)
comp = comp == res
assert np.all(comp.as_gpu_matrix().copy_to_host())
# check for e2e cartesian product:
if output_type not in ['dataframe', 'cudf']:
res2 = CumlArray(res)
res2 = res2.to_output('numpy')
if output_type == 'series' and shape == (10, 1):
assert np.all(inp.reshape((1, 10)) == res2)
else:
assert np.all(inp == res2)
@pytest.mark.parametrize('output_type', test_output_types)
@pytest.mark.parametrize('dtype', [
np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
])
@pytest.mark.parametrize('out_dtype', [
np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64,
])
@pytest.mark.parametrize('shape', test_shapes)
def test_output_dtype(output_type, dtype, out_dtype, shape):
inp = create_input('numpy', dtype, shape, order="F")
ary = CumlArray(inp)
if dtype in unsupported_cudf_dtypes and \
output_type in ['series', 'dataframe', 'cudf']:
with pytest.raises(ValueError):
res = ary.to_output(
output_type=output_type,
output_dtype=out_dtype
)
elif shape in [(10, 5), (1, 10)] and output_type == 'series':
with pytest.raises(ValueError):
res = ary.to_output(
output_type=output_type,
output_dtype=out_dtype
)
else:
res = ary.to_output(output_type=output_type, output_dtype=out_dtype)
if isinstance(res, cudf.DataFrame):
res.values.dtype == out_dtype
else:
res.dtype == out_dtype
@pytest.mark.parametrize('dtype', test_dtypes_all)
@pytest.mark.parametrize('shape', test_shapes)
@pytest.mark.parametrize('order', ['F', 'C'])
def test_cuda_array_interface(dtype, shape, order):
inp = create_input('numba', dtype, shape, 'F')
ary = CumlArray(inp)
if isinstance(shape, tuple):
assert ary.__cuda_array_interface__['shape'] == shape
else:
assert ary.__cuda_array_interface__['shape'] == (shape,)
assert ary.__cuda_array_interface__['strides'] == inp.strides
assert ary.__cuda_array_interface__['typestr'] == inp.dtype.str
assert ary.__cuda_array_interface__['data'] == \
(inp.device_ctypes_pointer.value, False)
assert ary.__cuda_array_interface__['version'] == 2
# since our test array is small, its faster to transfer it to numpy to
# square rather than a numba cuda kernel
truth = np.sqrt(inp.copy_to_host())
result = cp.sqrt(ary)
assert np.all(truth == cp.asnumpy(result))
return True
@pytest.mark.parametrize('input_type', test_input_types)
def test_serialize(input_type):
if input_type == 'series':
inp = create_input(input_type, np.float32, (10, 1), 'C')
else:
inp = create_input(input_type, np.float32, (10, 5), 'F')
ary = CumlArray(data=inp)
header, frames = ary.serialize()
ary2 = CumlArray.deserialize(header, frames)
assert pickle.loads(header['type-serialized']) is CumlArray
assert all(isinstance(f, Buffer) for f in frames)
if input_type == 'numpy':
assert np.all(inp == ary2.to_output('numpy'))
elif input_type == 'series':
assert np.all(inp == ary2.to_output('series'))
else:
assert cp.all(inp == cp.asarray(ary2))
assert ary.__cuda_array_interface__['shape'] == \
ary2.__cuda_array_interface__['shape']
assert ary.__cuda_array_interface__['strides'] == \
ary2.__cuda_array_interface__['strides']
assert ary.__cuda_array_interface__['typestr'] == \
ary2.__cuda_array_interface__['typestr']
if input_type != 'series':
# skipping one dimensional ary order test
assert ary.order == ary2.order
@pytest.mark.parametrize('input_type', test_input_types)
@pytest.mark.parametrize('protocol', [4, 5])
def test_pickle(input_type, protocol):
if protocol > pickle.HIGHEST_PROTOCOL:
pytest.skip(
f"Trying to test with pickle | |
<filename>test/test_scrambling.py
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import unittest
import numpy as np
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0 # Number of the GPU to be used
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except Runtime as e:
print(e)
from sionna.fec.scrambling import Descrambler, Scrambler
from sionna.utils import BinarySource
class TestScrambler(unittest.TestCase):
def test_sequence_dimension(self):
"""Test against correct dimensions of the sequence"""
seq_lengths = [1, 100, 256, 1000, 1e4]
batch_sizes = [1, 100, 256, 1000, 1e4]
# keep_State=True
for seq_length in seq_lengths:
# init new scrambler for new sequence size;
# only different batch_sizes are allowed in this mode
s = Scrambler(binary=False)
for batch_size in batch_sizes:
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
# build scrambler
x = s(llr).numpy()
self.assertTrue(np.array_equal(np.array(x.shape),
[int(batch_size), int(seq_length)]))
# keep_State=False
s = Scrambler(binary=False, keep_state=False)
for seq_length in seq_lengths:
for batch_size in batch_sizes:
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
# build scrambler
x = s(llr).numpy()
self.assertTrue(np.array_equal(np.array(x.shape),
[int(batch_size), int(seq_length)]))
def test_sequence_offset(self):
"""Test that scrambling sequence has no offset, i.e., equal likely 0s
and 1s"""
seq_length = int(1e4)
batch_size = int(1e2)
for seed in (None, 1337, 1234, 1003): # test some initial seeds
for keep_state in (False, True):
s = Scrambler(seed=seed, keep_state=keep_state, binary=True)
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
# build scrambler
s(llr)
# generate a random sequence
x = s(tf.zeros_like(llr))
self.assertAlmostEqual(np.mean(x),
0.5,
places=2)
def test_sequence_batch(self):
"""Test that scrambling sequence is random per batch sample iff
keep_batch_dims=True."""
seq_length = int(1e6)
batch_size = int(1e1)
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
for keep_state in (False, True):
s = Scrambler(keep_batch_constant=False,
keep_state=keep_state,
binary=True)
# generate a random sequence
x = s(tf.zeros_like(llr))
for i in range(batch_size-1):
for j in range(i+1,batch_size):
# each batch sample must be different
self.assertAlmostEqual(np.mean(np.abs(x[i,:]-x[j,:])),
0.5,
places=2)
# test that the pattern is the same of option keep_batch_constant==True
for keep_state in (False, True):
s = Scrambler(keep_batch_constant=True,
keep_state=keep_state,
binary=True)
# generate a random sequence
x = s(tf.zeros_like(llr))
for i in range(batch_size-1):
for j in range(i+1,batch_size):
# each batch sample is the same
self.assertTrue(np.sum(np.abs(x[i,:]-x[j,:]))==0)
def test_sequence_realization(self):
"""Test that scrambling sequences are random for each new realization.
"""
seq_length = int(1e5)
batch_size = int(1e2)
s = Scrambler(keep_state=False, binary=True)
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
# generate a random sequence
x1 = s(tf.zeros_like(llr))
x2 = s(tf.zeros_like(llr))
self.assertAlmostEqual(np.mean(np.abs(x1-x2)), 0.5, places=3)
def test_inverse(self):
"""Test that scrambling can be inverted/removed.
2x scrambling must result in the original sequence (for binary and
LLRs).
"""
seq_length = int(1e5)
batch_size = int(1e2)
#check binary scrambling
b = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)],
minval=0,
maxval=1)
for keep_batch in (False, True):
s = Scrambler(binary=True,
keep_batch_constant=keep_batch,
keep_state=True)
# only works if keep_state=True
b = tf.cast(tf.greater(0.5, b), dtype=tf.float32)
x = s(b)
x = s(x)
self.assertIsNone(np.testing.assert_array_equal(x.numpy(),
b.numpy()))
#check soft-value scrambling (flip sign)
s = Scrambler(binary=False,
keep_batch_constant=keep_batch,
keep_state=True)
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)])
x = s(llr)
x = s(x)
self.assertIsNone(np.testing.assert_array_equal(x.numpy(),
llr.numpy()))
def test_llr(self):
"""Test that scrambling works for soft-values (sign flip)."""
s = Scrambler(binary=False, seed=12345)
b = tf.ones([100,200])
x = s(b)
s2 = Scrambler(binary=True, seed=12345)
res = -2. * s2(tf.zeros_like(x)) + 1
self.assertIsNone(np.testing.assert_array_equal(x.numpy(), res.numpy()))
def test_keep_state(self):
"""Test that keep_state works as expected.
Iff keep_state==True, the scrambled sequences must be constant."""
seq_length = int(1e5)
batch_size = int(1e2)
llr = tf.random.uniform([tf.cast(batch_size, dtype=tf.int32),
tf.cast(seq_length, dtype=tf.int32)],
minval=-100,
maxval=100)
S = Scrambler(binary=True, keep_state=True)
res1 = S(tf.zeros_like(llr))
res2 = S(tf.zeros_like(llr))
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
# also check that the sequence is unique with keep_state=False
S = Scrambler(binary=True, keep_state=False)
_ = S(llr)
res1 = S(tf.zeros_like(llr))
_ = S(llr)
res2 = S(tf.zeros_like(llr))
self.assertFalse(np.array_equal(res1.numpy(), res2.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
source = BinarySource()
inputs = tf.keras.Input(shape=(k), dtype=tf.float32)
x = Scrambler()(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
# test that output batch dim is none
self.assertTrue(model.output_shape[0] is None)
# test that model can be called
b = source([bs,k])
model(b)
# call twice to see that bs can change
b2 = source([bs+1,k])
model(b2)
model.summary()
def test_tf_fun(self):
"""Test that graph mode and XLA works as expected"""
@tf.function()
def run_graph(llr):
return s(llr)
@tf.function(jit_compile=True)
def run_graph_xla(llr):
return s(llr)
for keep_state in (False, True):
s = Scrambler(keep_state=keep_state)
b = tf.ones([100,200])
x1 = run_graph(b)
x2 = run_graph_xla(b)
# again with different batch_size
b = tf.ones([101,200])
x1 = run_graph(b)
x2 = run_graph_xla(b)
# and different sequence length
b = tf.ones([101,201])
x1 = run_graph(b)
x2 = run_graph_xla(b)
self.assertTrue(np.any(np.not_equal(x1.numpy(),b.numpy())))
self.assertTrue(np.any(np.not_equal(x2.numpy(),b.numpy())))
def test_seed(self):
"""Test that seed generates reproducible results."""
seq_length = int(1e5)
batch_size = int(1e2)
b = tf.zeros([batch_size, seq_length])
s1 = Scrambler(seed=1337, binary=True, keep_state=False)
res_s1_1 = s1(b)
res_s1_2 = s1(b)
# new realization per call
self.assertFalse(np.array_equal(res_s1_1.numpy(), res_s1_2.numpy()))
# if keep_state=True, the same seed should lead to the same sequence
s2 = Scrambler(seed=1337, binary=True, keep_state=True)
res_s2_1 = s2(b)
s3 = Scrambler(seed=1337)
res_s3_1 = s3(b)
# same seed lead to same sequence
self.assertTrue(np.array_equal(res_s2_1.numpy(), res_s3_1.numpy()))
# but with random seed it gives a new sequence for each init
s4 = Scrambler(seed=None, binary=True, keep_state=True)
res_s4_1 = s2(b)
s5 = Scrambler(seed=None)
res_s5_1 = s5(b)
# same seed lead to same sequence
self.assertFalse(np.array_equal(res_s4_1.numpy(), res_s5_1.numpy()))
# for keep_State=False, even the same seed leads to new results
s6 = Scrambler(seed=1337, binary=True, keep_state=False)
res_s6_1 = s6(b)
# different seed generates new sequence
self.assertFalse(np.array_equal(res_s6_1.numpy(), res_s2_1.numpy()))
# init with same seed as previous random seed
s7 = Scrambler(seed=None, binary=True, keep_state=True)
res_s7_1 = s7(b)
s8 = Scrambler(seed=s7.seed, binary=True, keep_state=True)
res_s8_1 = s8(b)
# same seed lead to same sequence
self.assertTrue(np.array_equal(res_s7_1.numpy(), res_s8_1.numpy()))
# test that seed can be also provided to call
seed = 987654
s9 = Scrambler(seed=45234, keep_state=False)
s10 = Scrambler(seed=76543, keep_state=True)
x1 = s9([b, seed]).numpy()
x2 = s9([b, seed+1]).numpy()
x3 = s9([b, seed]).numpy()
x4 = s10([b, seed]).numpy()
self.assertFalse(np.array_equal(x1, x2)) # different seed
self.assertTrue(np.array_equal(x1, x3)) # same seed
self.assertTrue(np.array_equal(x1, x4)) # same seed (keep_state=f)
# test that random seed allows inverse
x5 = s9([b, seed])
x6 = s9([b, seed]).numpy()
# same seed
self.assertTrue(np.array_equal(x5, x6)) # identity
# different seed
x7 = s9([b, seed+1])
self.assertFalse(np.array_equal(x5, x7)) # identity
# same seed again
x8 = s9([b, seed+1])
self.assertTrue(np.array_equal(x7, x8)) # identity
def test_dtype(self):
"""Test that variable dtypes are supported."""
seq_length = int(1e1)
batch_size = int(1e2)
dt_supported = [tf.float16, tf.float32, tf.float64]
for dt in dt_supported:
for dt_in in dt_supported:
for dt_out in dt_supported:
b = tf.zeros([batch_size, seq_length], dtype=dt_in)
s1 = Scrambler(dtype=dt)
s2 = Descrambler(s1, dtype=dt_out)
x = s1(b)
y = s2(x)
assert (x.dtype==dt)
assert (y.dtype==dt_out)
def test_descrambler(self):
""""Test that descrambler works as expected."""
seq_length = int(1e2)
batch_size = int(1e1)
b = tf.zeros([batch_size, seq_length])
s1 = Scrambler()
s2 = Descrambler(s1)
x = s1(b)
y = s2(x)
assert (np.array_equal(b.numpy(), y.numpy()))
x = s1([b, 1234])
y = s2(x)
assert (not np.array_equal(b.numpy(), y.numpy()))
# check if seed is correctly retrieved from scrambler
s3 = Scrambler(seed=12345)
s4 = Descrambler(s3)
x = s3(b)
y = s4(x)
assert (np.array_equal(b.numpy(), y.numpy()))
def test_descrambler_nonbin(self):
""""Test that descrambler works with non-binary."""
seq_length = int(1e2)
batch_size = int(1e1)
b = tf.zeros([batch_size, seq_length])
# scrambler binary, but descrambler non-binary
scrambler = Scrambler(seed=1235456, binary=True)
descrambler = Descrambler(scrambler, binary=False)
# with explicit seed
s = 8764
y = scrambler([b, s])
z = descrambler([2*y-1, s]) # bspk
z = 1 + z # remove bpsk
assert (np.array_equal(b.numpy(), z.numpy()))
#without explicit seed
y = scrambler(b)
z = descrambler(2*y-1) # bspk
z = 1 + z # remove bpsk
assert (np.array_equal(b.numpy(), z.numpy()))
# scrambler non-binary, but descrambler
scrambler = Scrambler(seed=1235456, binary=False)
descrambler = Descrambler(scrambler, binary=True)
s = 546342
y = scrambler([2*b-1, s]) # bspk
y = 0.5*(1 + y) # remove bpsk
z = descrambler([y, s])
assert (np.array_equal(b.numpy(), z.numpy()))
#without explicit seed
y = scrambler(2*b-1) # bspk
y = 0.5*(1 + y) # remove bpsk
z = descrambler(y)
y = 1 + y # remove bpsk
assert (np.array_equal(b.numpy(), z.numpy()))
def test_scrambler_binary(self):
"""test that binary flag can be used as input"""
seq_length = int(1e2)
batch_size = int(1e1)
b = tf.ones([batch_size, seq_length])
# scrambler binary, but descrambler non-binary
scrambler = Scrambler(seed=1245, binary=True)
s = 1234
| |
__init__(self):
super(Pce.TopologySummary.StatsTopologyUpdate, self).__init__()
self.yang_name = "stats-topology-update"
self.yang_parent_name = "topology-summary"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('num_nodes_added', YLeaf(YType.uint32, 'num-nodes-added')),
('num_nodes_deleted', YLeaf(YType.uint32, 'num-nodes-deleted')),
('num_links_added', YLeaf(YType.uint32, 'num-links-added')),
('num_links_deleted', YLeaf(YType.uint32, 'num-links-deleted')),
('num_prefixes_added', YLeaf(YType.uint32, 'num-prefixes-added')),
('num_prefixes_deleted', YLeaf(YType.uint32, 'num-prefixes-deleted')),
])
self.num_nodes_added = None
self.num_nodes_deleted = None
self.num_links_added = None
self.num_links_deleted = None
self.num_prefixes_added = None
self.num_prefixes_deleted = None
self._segment_path = lambda: "stats-topology-update"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce/topology-summary/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Pce.TopologySummary.StatsTopologyUpdate, ['num_nodes_added', 'num_nodes_deleted', 'num_links_added', 'num_links_deleted', 'num_prefixes_added', 'num_prefixes_deleted'], name, value)
class TunnelInfos(Entity):
"""
Tunnel database in XTC
.. attribute:: tunnel_info
Tunnel information
**type**\: list of :py:class:`TunnelInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.TunnelInfos.TunnelInfo>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TunnelInfos, self).__init__()
self.yang_name = "tunnel-infos"
self.yang_parent_name = "pce"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("tunnel-info", ("tunnel_info", Pce.TunnelInfos.TunnelInfo))])
self._leafs = OrderedDict()
self.tunnel_info = YList(self)
self._segment_path = lambda: "tunnel-infos"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Pce.TunnelInfos, [], name, value)
class TunnelInfo(Entity):
"""
Tunnel information
.. attribute:: peer_address (key)
Peer Address
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: plsp_id (key)
PCEP LSP ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: tunnel_name (key)
Tunnel name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: pcc_address
PCC address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: tunnel_name_xr
Tunnel Name
**type**\: str
.. attribute:: brief_lsp_information
Brief LSP information
**type**\: list of :py:class:`BriefLspInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.TunnelInfos.TunnelInfo.BriefLspInformation>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TunnelInfos.TunnelInfo, self).__init__()
self.yang_name = "tunnel-info"
self.yang_parent_name = "tunnel-infos"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['peer_address','plsp_id','tunnel_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("brief-lsp-information", ("brief_lsp_information", Pce.TunnelInfos.TunnelInfo.BriefLspInformation))])
self._leafs = OrderedDict([
('peer_address', YLeaf(YType.str, 'peer-address')),
('plsp_id', YLeaf(YType.int32, 'plsp-id')),
('tunnel_name', YLeaf(YType.str, 'tunnel-name')),
('pcc_address', YLeaf(YType.str, 'pcc-address')),
('tunnel_name_xr', YLeaf(YType.str, 'tunnel-name-xr')),
])
self.peer_address = None
self.plsp_id = None
self.tunnel_name = None
self.pcc_address = None
self.tunnel_name_xr = None
self.brief_lsp_information = YList(self)
self._segment_path = lambda: "tunnel-info" + "[peer-address='" + str(self.peer_address) + "']" + "[plsp-id='" + str(self.plsp_id) + "']" + "[tunnel-name='" + str(self.tunnel_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce/tunnel-infos/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Pce.TunnelInfos.TunnelInfo, ['peer_address', 'plsp_id', 'tunnel_name', 'pcc_address', 'tunnel_name_xr'], name, value)
class BriefLspInformation(Entity):
"""
Brief LSP information
.. attribute:: source_address
Source address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: destination_address
Destination address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: tunnel_id
Tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: lspid
LSP ID
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_sid
Binding SID
**type**\: int
**range:** 0..4294967295
.. attribute:: lsp_setup_type
LSP Setup Type
**type**\: :py:class:`LspSetup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.LspSetup>`
.. attribute:: operational_state
Operational state
**type**\: :py:class:`PcepLspState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PcepLspState>`
.. attribute:: administrative_state
Admin state
**type**\: :py:class:`LspState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.LspState>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TunnelInfos.TunnelInfo.BriefLspInformation, self).__init__()
self.yang_name = "brief-lsp-information"
self.yang_parent_name = "tunnel-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('source_address', YLeaf(YType.str, 'source-address')),
('destination_address', YLeaf(YType.str, 'destination-address')),
('tunnel_id', YLeaf(YType.uint32, 'tunnel-id')),
('lspid', YLeaf(YType.uint32, 'lspid')),
('binding_sid', YLeaf(YType.uint32, 'binding-sid')),
('lsp_setup_type', YLeaf(YType.enumeration, 'lsp-setup-type')),
('operational_state', YLeaf(YType.enumeration, 'operational-state')),
('administrative_state', YLeaf(YType.enumeration, 'administrative-state')),
])
self.source_address = None
self.destination_address = None
self.tunnel_id = None
self.lspid = None
self.binding_sid = None
self.lsp_setup_type = None
self.operational_state = None
self.administrative_state = None
self._segment_path = lambda: "brief-lsp-information"
def __setattr__(self, name, value):
self._perform_setattr(Pce.TunnelInfos.TunnelInfo.BriefLspInformation, ['source_address', 'destination_address', 'tunnel_id', 'lspid', 'binding_sid', 'lsp_setup_type', 'operational_state', 'administrative_state'], name, value)
class PeerDetailInfos(Entity):
"""
Detailed peers database in XTC
.. attribute:: peer_detail_info
Detailed PCE peer information
**type**\: list of :py:class:`PeerDetailInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PeerDetailInfos.PeerDetailInfo>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.PeerDetailInfos, self).__init__()
self.yang_name = "peer-detail-infos"
self.yang_parent_name = "pce"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("peer-detail-info", ("peer_detail_info", Pce.PeerDetailInfos.PeerDetailInfo))])
self._leafs = OrderedDict()
self.peer_detail_info = YList(self)
self._segment_path = lambda: "peer-detail-infos"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Pce.PeerDetailInfos, [], name, value)
class PeerDetailInfo(Entity):
"""
Detailed PCE peer information
.. attribute:: peer_address (key)
Peer Address
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: detail_pcep_information
Detailed PCE protocol information
**type**\: :py:class:`DetailPcepInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation>`
.. attribute:: peer_address_xr
Peer address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: peer_protocol
Protocol between PCE and peer
**type**\: :py:class:`PceProto <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceProto>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.PeerDetailInfos.PeerDetailInfo, self).__init__()
self.yang_name = "peer-detail-info"
self.yang_parent_name = "peer-detail-infos"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['peer_address']
self._child_container_classes = OrderedDict([("detail-pcep-information", ("detail_pcep_information", Pce.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('peer_address', YLeaf(YType.str, 'peer-address')),
('peer_address_xr', YLeaf(YType.str, 'peer-address-xr')),
('peer_protocol', YLeaf(YType.enumeration, 'peer-protocol')),
])
self.peer_address = None
self.peer_address_xr = None
self.peer_protocol = None
self.detail_pcep_information = Pce.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation()
self.detail_pcep_information.parent = self
self._children_name_map["detail_pcep_information"] = "detail-pcep-information"
self._children_yang_names.add("detail-pcep-information")
self._segment_path = lambda: "peer-detail-info" + "[peer-address='" + str(self.peer_address) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce/peer-detail-infos/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Pce.PeerDetailInfos.PeerDetailInfo, ['peer_address', 'peer_address_xr', 'peer_protocol'], name, value)
class DetailPcepInformation(Entity):
"""
Detailed PCE protocol information
.. attribute:: brief_pcep_information
Brief PCE protocol information
**type**\: :py:class:`BriefPcepInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.BriefPcepInformation>`
.. attribute:: last_error_rx
Last PCError received
**type**\: :py:class:`LastErrorRx <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorRx>`
.. attribute:: last_error_tx
Last PCError sent
**type**\: :py:class:`LastErrorTx <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorTx>`
.. attribute:: error
Error (for display only)
**type**\: str
.. attribute:: speaker_id
Speaker Entity ID
**type**\: str
.. attribute:: pcep_up_time
PCEP Up Time
**type**\: int
**range:** 0..4294967295
.. attribute:: keepalives
Keepalive count
**type**\: int
**range:** 0..4294967295
.. attribute:: md5_enabled
MD5 Authentication Enabled
**type**\: bool
.. attribute:: keychain_enabled
Keychain based Authentication Enabled
**type**\: bool
.. attribute:: negotiated_local_keepalive
Negotiated KA
**type**\: int
**range:** 0..4294967295
.. attribute:: negotiated_remote_keepalive
Negotiated KA
**type**\: int
**range:** 0..4294967295
.. attribute:: negotiated_dead_time
Negotiated DT
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_request_rx
PCEReq Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_request_tx
PCEReq Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_reply_rx
PCERep Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_reply_tx
PCERep Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_error_rx
PCEErr Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_error_tx
PCEErr Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_open_tx
PCEOpen Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_open_rx
PCEOpen Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_report_rx
PCERpt Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_report_tx
PCERpt Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_update_rx
PCEUpd Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_update_tx
PCEUpd Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_initiate_rx
PCEInit Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_initiate_tx
PCEInit Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_keepalive_tx
PCE Keepalive Tx
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: pce_keepalive_rx
PCE Keepalive Rx
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: local_session_id
Local PCEP session ID
**type**\: int
**range:** 0..255
.. attribute:: remote_session_id
Remote PCEP session ID
**type**\: int
**range:** 0..255
.. attribute:: minimum_keepalive_interval
Minimum keepalive interval for the peer
**type**\: int
**range:** 0..255
.. attribute:: maximum_dead_interval
Maximum dead interval for the peer
**type**\: int
**range:** 0..255
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation, self).__init__()
self.yang_name = "detail-pcep-information"
self.yang_parent_name = "peer-detail-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("brief-pcep-information", ("brief_pcep_information", Pce.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.BriefPcepInformation)), ("last-error-rx", ("last_error_rx", Pce.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorRx)), ("last-error-tx", ("last_error_tx", Pce.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorTx))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('error', YLeaf(YType.str, 'error')),
('speaker_id', YLeaf(YType.str, 'speaker-id')),
('pcep_up_time', YLeaf(YType.uint32, 'pcep-up-time')),
('keepalives', YLeaf(YType.uint32, 'keepalives')),
('md5_enabled', YLeaf(YType.boolean, 'md5-enabled')),
| |
'1f236': {'canonical_name': 'X', 'aliases': ['u6709']},
# '1f21a': {'canonical_name': 'X', 'aliases': ['u7121']},
# '1f238': {'canonical_name': 'X', 'aliases': ['u7533']},
# '1f23a': {'canonical_name': 'X', 'aliases': ['u55b6']},
# '1f237': {'canonical_name': 'X', 'aliases': ['u6708']},
'2734': {'canonical_name': 'eight_pointed_star', 'aliases': []},
'1f19a': {'canonical_name': 'vs', 'aliases': []},
'1f4ae': {'canonical_name': 'white_flower', 'aliases': []},
# '1f250': {'canonical_name': 'X', 'aliases': ['ideograph_advantage']},
# japanese character
# '3299': {'canonical_name': 'X', 'aliases': ['secret']},
# '3297': {'canonical_name': 'X', 'aliases': ['congratulations']},
# '1f234': {'canonical_name': 'X', 'aliases': ['u5408']},
# '1f235': {'canonical_name': 'X', 'aliases': ['u6e80']},
# '1f239': {'canonical_name': 'X', 'aliases': ['u5272']},
# '1f232': {'canonical_name': 'X', 'aliases': ['u7981']},
'1f170': {'canonical_name': 'a', 'aliases': []},
'1f171': {'canonical_name': 'b', 'aliases': []},
'1f18e': {'canonical_name': 'ab', 'aliases': []},
'1f191': {'canonical_name': 'cl', 'aliases': []},
'1f17e': {'canonical_name': 'o', 'aliases': []},
'1f198': {'canonical_name': 'sos', 'aliases': []},
# Symbols/105 seems like a better x, and looks more like the other letters
'274c': {'canonical_name': 'cross_mark', 'aliases': ['incorrect', 'wrong']},
'2b55': {'canonical_name': 'circle', 'aliases': []},
'1f6d1': {'canonical_name': 'stop_sign', 'aliases': ['octagonal_sign']},
'26d4': {'canonical_name': 'no_entry', 'aliases': ['wrong_way']},
'1f4db': {'canonical_name': 'name_badge', 'aliases': []},
'1f6ab': {'canonical_name': 'prohibited', 'aliases': ['not_allowed']},
'1f4af': {'canonical_name': '100', 'aliases': ['hundred']},
'1f4a2': {'canonical_name': 'anger', 'aliases': ['bam', 'pow']},
'2668': {'canonical_name': 'hot_springs', 'aliases': []},
'1f6b7': {'canonical_name': 'no_pedestrians', 'aliases': []},
'1f6af': {'canonical_name': 'do_not_litter', 'aliases': []},
'1f6b3': {'canonical_name': 'no_bicycles', 'aliases': []},
'1f6b1': {'canonical_name': 'non-potable_water', 'aliases': []},
'1f51e': {'canonical_name': 'underage', 'aliases': ['nc17']},
'1f4f5': {'canonical_name': 'no_phones', 'aliases': []},
'1f6ad': {'canonical_name': 'no_smoking', 'aliases': []},
'2757': {'canonical_name': 'exclamation', 'aliases': []},
'2755': {'canonical_name': 'grey_exclamation', 'aliases': []},
'2753': {'canonical_name': 'question', 'aliases': []},
'2754': {'canonical_name': 'grey_question', 'aliases': []},
'203c': {'canonical_name': 'bangbang', 'aliases': ['double_exclamation']},
'2049': {'canonical_name': 'interrobang', 'aliases': []},
'1f505': {'canonical_name': 'low_brightness', 'aliases': ['dim']},
'1f506': {'canonical_name': 'brightness', 'aliases': ['high_brightness']},
'303d': {'canonical_name': 'part_alternation', 'aliases': []},
'26a0': {'canonical_name': 'warning', 'aliases': ['caution', 'danger']},
'1f6b8': {'canonical_name': 'children_crossing', 'aliases': ['school_crossing', 'drive_with_care']},
'1f531': {'canonical_name': 'trident', 'aliases': []},
'269c': {'canonical_name': 'fleur_de_lis', 'aliases': []},
'1f530': {'canonical_name': 'beginner', 'aliases': []},
'267b': {'canonical_name': 'recycle', 'aliases': []},
# seems like the best check
'2705': {'canonical_name': 'check', 'aliases': ['all_good', 'approved']},
# '1f22f': {'canonical_name': 'X', 'aliases': ['u6307']},
# stock_market seemed more useful
'1f4b9': {'canonical_name': 'stock_market', 'aliases': []},
'2747': {'canonical_name': 'sparkle', 'aliases': []},
'2733': {'canonical_name': 'eight_spoked_asterisk', 'aliases': []},
'274e': {'canonical_name': 'x', 'aliases': []},
'1f310': {'canonical_name': 'www', 'aliases': ['globe']},
'1f4a0': {'canonical_name': 'cute', 'aliases': ['kawaii', 'diamond_with_a_dot']},
'24c2': {'canonical_name': 'metro', 'aliases': ['m']},
'1f300': {'canonical_name': 'cyclone', 'aliases': ['hurricane', 'typhoon']},
'1f4a4': {'canonical_name': 'zzz', 'aliases': []},
'1f3e7': {'canonical_name': 'atm', 'aliases': []},
'1f6be': {'canonical_name': 'wc', 'aliases': ['water_closet']},
'267f': {'canonical_name': 'accessible', 'aliases': ['wheelchair', 'disabled']},
'1f17f': {'canonical_name': 'parking', 'aliases': ['p']},
# '1f233': {'canonical_name': 'X', 'aliases': ['u7a7a']},
# '1f202': {'canonical_name': 'X', 'aliases': ['sa']},
'1f6c2': {'canonical_name': 'passport_control', 'aliases': ['immigration']},
'1f6c3': {'canonical_name': 'customs', 'aliases': []},
'1f6c4': {'canonical_name': 'baggage_claim', 'aliases': []},
'1f6c5': {'canonical_name': 'locker', 'aliases': ['locked_bag']},
'1f6b9': {'canonical_name': 'mens', 'aliases': []},
'1f6ba': {'canonical_name': 'womens', 'aliases': []},
# seems more in line with the surrounding bathroom symbols
'1f6bc': {'canonical_name': 'baby_change_station', 'aliases': ['nursery']},
'1f6bb': {'canonical_name': 'restroom', 'aliases': []},
'1f6ae': {'canonical_name': 'put_litter_in_its_place', 'aliases': []},
'1f3a6': {'canonical_name': 'cinema', 'aliases': ['movie_theater']},
'1f4f6': {'canonical_name': 'cell_reception', 'aliases': ['signal_strength', 'signal_bars']},
# '1f201': {'canonical_name': 'X', 'aliases': ['koko']},
'1f523': {'canonical_name': 'symbols', 'aliases': []},
'2139': {'canonical_name': 'info', 'aliases': []},
'1f524': {'canonical_name': 'abc', 'aliases': []},
'1f521': {'canonical_name': 'abcd', 'aliases': ['alphabet']},
'1f520': {'canonical_name': 'capital_abcd', 'aliases': ['capital_letters']},
'1f196': {'canonical_name': 'ng', 'aliases': []},
# from unicode/gemoji. Saving ok for People/111
'1f197': {'canonical_name': 'squared_ok', 'aliases': []},
# from unicode, and to parallel Symbols/135. Saving up for Symbols/171
'1f199': {'canonical_name': 'squared_up', 'aliases': []},
'1f192': {'canonical_name': 'cool', 'aliases': []},
'1f195': {'canonical_name': 'new', 'aliases': []},
'1f193': {'canonical_name': 'free', 'aliases': []},
'0030-20e3': {'canonical_name': 'zero', 'aliases': []},
'0031-20e3': {'canonical_name': 'one', 'aliases': []},
'0032-20e3': {'canonical_name': 'two', 'aliases': []},
'0033-20e3': {'canonical_name': 'three', 'aliases': []},
'0034-20e3': {'canonical_name': 'four', 'aliases': []},
'0035-20e3': {'canonical_name': 'five', 'aliases': []},
'0036-20e3': {'canonical_name': 'six', 'aliases': []},
'0037-20e3': {'canonical_name': 'seven', 'aliases': []},
'0038-20e3': {'canonical_name': 'eight', 'aliases': []},
'0039-20e3': {'canonical_name': 'nine', 'aliases': []},
'1f51f': {'canonical_name': 'ten', 'aliases': []},
'1f522': {'canonical_name': '1234', 'aliases': ['numbers']},
'0023-20e3': {'canonical_name': 'hash', 'aliases': []},
'002a-20e3': {'canonical_name': 'asterisk', 'aliases': []},
'25b6': {'canonical_name': 'play', 'aliases': []},
'23f8': {'canonical_name': 'pause', 'aliases': []},
'23ef': {'canonical_name': 'play_pause', 'aliases': []},
# stop taken by People/118
'23f9': {'canonical_name': 'stop_button', 'aliases': []},
'23fa': {'canonical_name': 'record', 'aliases': []},
'23ed': {'canonical_name': 'next_track', 'aliases': ['skip_forward']},
'23ee': {'canonical_name': 'previous_track', 'aliases': ['skip_back']},
'23e9': {'canonical_name': 'fast_forward', 'aliases': []},
'23ea': {'canonical_name': 'rewind', 'aliases': ['fast_reverse']},
'23eb': {'canonical_name': 'double_up', 'aliases': ['fast_up']},
'23ec': {'canonical_name': 'double_down', 'aliases': ['fast_down']},
'25c0': {'canonical_name': 'play_reverse', 'aliases': []},
'1f53c': {'canonical_name': 'upvote', 'aliases': ['up_button', 'increase']},
'1f53d': {'canonical_name': 'downvote', 'aliases': ['down_button', 'decrease']},
'27a1': {'canonical_name': 'right', 'aliases': ['east']},
'2b05': {'canonical_name': 'left', 'aliases': ['west']},
'2b06': {'canonical_name': 'up', 'aliases': ['north']},
'2b07': {'canonical_name': 'down', 'aliases': ['south']},
'2197': {'canonical_name': 'upper_right', 'aliases': ['north_east']},
'2198': {'canonical_name': 'lower_right', 'aliases': ['south_east']},
'2199': {'canonical_name': 'lower_left', 'aliases': ['south_west']},
'2196': {'canonical_name': 'upper_left', 'aliases': ['north_west']},
'2195': {'canonical_name': 'up_down', 'aliases': []},
'2194': {'canonical_name': 'left_right', 'aliases': ['swap']},
'21aa': {'canonical_name': 'forward', 'aliases': ['right_hook']},
'21a9': {'canonical_name': 'reply', 'aliases': ['left_hook']},
'2934': {'canonical_name': 'heading_up', 'aliases': []},
'2935': {'canonical_name': 'heading_down', 'aliases': []},
'1f500': {'canonical_name': 'shuffle', 'aliases': []},
'1f501': {'canonical_name': 'repeat', 'aliases': []},
'1f502': {'canonical_name': 'repeat_one', 'aliases': []},
'1f504': {'canonical_name': 'counterclockwise', 'aliases': ['return']},
'1f503': {'canonical_name': 'clockwise', 'aliases': []},
'1f3b5': {'canonical_name': 'music', 'aliases': []},
'1f3b6': {'canonical_name': 'musical_notes', 'aliases': []},
'2795': {'canonical_name': 'plus', 'aliases': ['add']},
'2796': {'canonical_name': 'minus', 'aliases': ['subtract']},
'2797': {'canonical_name': 'division', 'aliases': ['divide']},
'2716': {'canonical_name': 'multiplication', 'aliases': ['multiply']},
'1f4b2': {'canonical_name': 'dollars', 'aliases': []},
# There is no other exchange, so might as well generalize this
'1f4b1': {'canonical_name': 'exchange', 'aliases': []},
'2122': {'canonical_name': 'tm', 'aliases': ['trademark']},
'3030': {'canonical_name': 'wavy_dash', 'aliases': []},
'27b0': {'canonical_name': 'loop', 'aliases': []},
# https://emojipedia.org/double-curly-loop/
'27bf': {'canonical_name': 'double_loop', 'aliases': ['voicemail']},
'1f51a': {'canonical_name': 'end', 'aliases': []},
'1f519': {'canonical_name': 'back', 'aliases': []},
'1f51b': {'canonical_name': 'on', 'aliases': []},
'1f51d': {'canonical_name': 'top', 'aliases': []},
'1f51c': {'canonical_name': 'soon', 'aliases': []},
'2714': {'canonical_name': 'check_mark', 'aliases': []},
'2611': {'canonical_name': 'checkbox', 'aliases': []},
'1f518': {'canonical_name': 'radio_button', 'aliases': []},
'26aa': {'canonical_name': 'white_circle', 'aliases': []},
'26ab': {'canonical_name': 'black_circle', 'aliases': []},
'1f534': {'canonical_name': 'red_circle', 'aliases': []},
'1f535': {'canonical_name': 'blue_circle', 'aliases': []},
'1f53a': {'canonical_name': 'red_triangle_up', 'aliases': []},
'1f53b': {'canonical_name': 'red_triangle_down', 'aliases': []},
'1f538': {'canonical_name': 'small_orange_diamond', 'aliases': []},
'1f539': {'canonical_name': 'small_blue_diamond', 'aliases': []},
'1f536': {'canonical_name': 'large_orange_diamond', 'aliases': []},
'1f537': {'canonical_name': 'large_blue_diamond', 'aliases': []},
'1f533': {'canonical_name': 'black_and_white_square', 'aliases': []},
'1f532': {'canonical_name': 'white_and_black_square', 'aliases': []},
'25aa': {'canonical_name': 'black_small_square', 'aliases': []},
'25ab': {'canonical_name': 'white_small_square', 'aliases': []},
'25fe': {'canonical_name': 'black_medium_small_square', 'aliases': []},
'25fd': {'canonical_name': 'white_medium_small_square', 'aliases': []},
'25fc': {'canonical_name': 'black_medium_square', 'aliases': []},
'25fb': {'canonical_name': 'white_medium_square', 'aliases': []},
'2b1b': {'canonical_name': 'black_large_square', 'aliases': []},
'2b1c': {'canonical_name': 'white_large_square', 'aliases': []},
'1f508': {'canonical_name': 'speaker', 'aliases': []},
'1f507': {'canonical_name': 'mute', 'aliases': ['no_sound']},
'1f509': {'canonical_name': 'softer', 'aliases': []},
'1f50a': {'canonical_name': 'louder', 'aliases': ['sound']},
'1f514': {'canonical_name': 'notifications', 'aliases': ['bell']},
'1f515': {'canonical_name': 'mute_notifications', 'aliases': []},
'1f4e3': {'canonical_name': 'megaphone', 'aliases': ['shout']},
'1f4e2': {'canonical_name': 'loudspeaker', 'aliases': ['bullhorn']},
'1f4ac': {'canonical_name': 'umm', 'aliases': ['speech_balloon']},
'1f5e8': {'canonical_name': 'speech_bubble', 'aliases': []},
'1f4ad': {'canonical_name': 'thought', 'aliases': ['dream']},
'1f5ef': {'canonical_name': 'anger_bubble', 'aliases': []},
'2660': {'canonical_name': 'spades', 'aliases': []},
'2663': {'canonical_name': 'clubs', 'aliases': []},
'2665': {'canonical_name': 'hearts', 'aliases': []},
'2666': {'canonical_name': 'diamonds', 'aliases': []},
'1f0cf': {'canonical_name': 'joker', 'aliases': []},
'1f3b4': {'canonical_name': 'playing_cards', 'aliases': []},
'1f004': {'canonical_name': 'mahjong', 'aliases': []},
# The only use I can think of for so many clocks is to be able to use them
# to vote on times and such in emoji reactions. But a) the experience is
# not that great (the images are too small), b) there are issues with
# 24-hour time (used in many countries), like what is 00:30 or 01:00
# called, c) it's hard to make the compose typeahead experience great, and
# d) we should have a dedicated time voting widget that takes care of
# timezone and locale issues, and uses a digital representation.
# '1f550': {'canonical_name': 'X', 'aliases': ['clock1']},
# '1f551': {'canonical_name': 'X', 'aliases': ['clock2']},
# '1f552': {'canonical_name': 'X', 'aliases': ['clock3']},
# '1f553': {'canonical_name': 'X', 'aliases': ['clock4']},
# '1f554': {'canonical_name': 'X', 'aliases': ['clock5']},
# '1f555': {'canonical_name': 'X', 'aliases': ['clock6']},
# '1f556': {'canonical_name': 'X', 'aliases': ['clock7']},
# seems like the best choice for time
'1f557': {'canonical_name': 'time', 'aliases': ['clock']},
# '1f558': {'canonical_name': 'X', 'aliases': ['clock9']},
# '1f559': {'canonical_name': 'X', 'aliases': ['clock10']},
# '1f55a': {'canonical_name': 'X', 'aliases': ['clock11']},
# '1f55b': {'canonical_name': 'X', 'aliases': ['clock12']},
# '1f55c': {'canonical_name': 'X', 'aliases': ['clock130']},
# '1f55d': {'canonical_name': 'X', 'aliases': ['clock230']},
| |
<gh_stars>0
import json
import numpy as np
from PIL import Image
from django.http import JsonResponse
from apps.face_element_swapping import get_faces_landmarks
from apps.face_element_swapping.change_faces import ChangeFaceElement
from ..db_func import DBFunc
from ..helpers import convert_img_to_base64, \
convert_base64_to_pil, \
convert_rgb_array_to_text, \
convert_text_to_rgb_array, \
remove_prefix_from_base64, \
set_mode_of_pil, \
correct_size, resize_img
from ..settings import MESSAGES_REGARDING_MORE_OR_LESS_THAN_ONE_FACE, \
MESSAGES_REGARDING_EXACTLY_ONE_FACE, LANDMARKS_FUNCTIONS, MINIMUM_VALUE_OF_THE_ALPHA_CHANNEL, \
DEFAULT_PIL_MODE, PIL_MODE_OF_TRANSPARENT_PHOTOS, CORRECT_NUMBER_OF_CHANNELS_PER_PIXEL, \
INDEX_OF_THE_NUMBER_OF_CHANNELS_PER_PIXEL, INDEX_OF_THE_VALUE_OF_ALPHA_CHANNEL, PARTS_OF_THE_FACE_WITH_THE_CUT_FIELD
class ProcessUserPhoto:
def __init__(self, input_photo, part_of_face, face_id):
self._input_photo = input_photo
self._part_of_face = part_of_face
self._face_id = face_id
self._photo_in_base64 = None
self._src_rgb_array = None
self._dst_rgb_array = None
self._src_endpoints = None
self._dst_endpoints = None
self._transparent_pixels = []
self._number_of_detected_faces = None
self._more_or_less_than_one_photo = None
@staticmethod
def prepare_params_to_face_swapping(part_of_face,
landmarks):
"""
:param part_of_face: a specific part of the face
:type part_of_face: string - str
:param landmarks: characteristic points for the specific parts of a face.
The landmarks should comes from calling one of the functions
included in the 'LANDMARKS_FUNCTIONS' dictionary (from the file '..settings').
:type landmarks: dictionary - {}
:return: dictionary with the keys: 'polygon' and 'cut_field'
:rtype: dictionary - {}
"""
endpoints = {}
if part_of_face.lower() in map(str.lower, PARTS_OF_THE_FACE_WITH_THE_CUT_FIELD):
endpoints["polygon"] = landmarks["four_endpoints"]
endpoints["cut_field"] = landmarks["six_endpoints"]
else:
endpoints["polygon"] = landmarks
endpoints["cut_field"] = None
return endpoints
@staticmethod
def more_or_less_than_one_face_info(number_of_detected_faces,
json_format=True):
"""
:param number_of_detected_faces: number of detected faces in an image
:type number_of_detected_faces: integer - int
:param json_format: param indicates if returned dictionary should be converted into a JSON object
:type json_format: bool (True or False)
:return dictionary where the number of detected faces is assigned to the key named 'number_of_detected_faces'
and False (bool) is assigned to the key named 'face_detected_successfully'.
:rtype dictionary - {} or dictionary converted into a JSON object (type - django.http.response.JsonResponse)
It depends on the parameter 'json_format'.
"""
data = MESSAGES_REGARDING_MORE_OR_LESS_THAN_ONE_FACE
data["number_of_detected_faces"] = number_of_detected_faces
if json_format:
return JsonResponse(data)
return data
@staticmethod
def processed_img_info(swapped_part_of_face,
json_format=True):
"""
This function converts 'swapped_part_of_face' to Base64 and returns a dictionary with data.
The dictionary may be converted into a JSON object.
:param swapped_part_of_face: an RGB image converted into a numpy array (the array has following shape(y, x, 3))
The image shows a face with a part from a different face.
:type swapped_part_of_face: numpy.ndarray (https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html)
:param json_format: param indicates if returned dictionary should be converted into a JSON object
:type json_format: bool (True or False)
:return dictionary: {
"face_detected_successfully": True,
"number_of_detected_faces": 1,
"img_src": 'swapped_part_of_face' converted to Base64
}
:rtype dictionary - {} or dictionary converted into a JSON object (type - django.http.response.JsonResponse)
It depends on the parameter 'json_format'.
"""
data = MESSAGES_REGARDING_EXACTLY_ONE_FACE
data["img_src"] = convert_img_to_base64(img=swapped_part_of_face)
if json_format:
return JsonResponse(data)
return data
@staticmethod
def prepare_endpoints_from_db(face_landmarks,
part_of_face,
json_format=True):
"""
:param face_landmarks: characteristic points for specific parts of a face
:type face_landmarks: dictionary - {} or dictionary converted into a JSON object
(it depends on the parameter 'json_format')
:param part_of_face: a specific part of the face whose data will be searched in the dictionary(face_landmarks)
:type part_of_face: string - str
:param json_format: param indicates if the passed dictionary('face_landmarks') was converted into a JSON object
:type json_format: bool (True or False)
:return result of calling the function 'prepare_params_to_face_swapping' from this class.
(dictionary with the keys: 'polygon' and 'cut_field')
:rtype dictionary - {}
"""
if json_format:
face_landmarks = json.loads(face_landmarks)[part_of_face]
return ProcessUserPhoto.prepare_params_to_face_swapping(part_of_face=part_of_face,
landmarks=face_landmarks)
@staticmethod
def get_landmarks_of_parts_of_face(face_landmarks):
"""
:param face_landmarks: landmarks of a single face generated
by the function 'face_landmarks' from the module named 'face_recognition'
(link to the module named 'face_recognition' - https://pypi.org/project/face_recognition/)
:type face_landmarks: dictionary - {}
:return characteristic points for the specific parts of a face.
Each part of the face contained in the 'LANDMARKS_FUNCTIONS' dictionary
has a function generating the characteristic points for given part of the face.
:rtype dictionary - {}
"""
landmarks_of_parts_of_face = {}
for part_of_face in LANDMARKS_FUNCTIONS:
landmarks_of_parts_of_face[part_of_face] = LANDMARKS_FUNCTIONS[part_of_face](face_landmarks)
return landmarks_of_parts_of_face
@staticmethod
def prepare_transparent_pixels(rgba_array):
"""
This function looks for the pixels, whose alpha channel value is less than
the value of 'MINIMUM_VALUE_OF_ALPHA_CHANNEL'.
:param rgba_array: an RGBA image converted into a numpy array (the array has following shape(y, x, 4))
:type rgba_array: numpy.ndarray (https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html)
:return: list of dictionaries. Each of these dictionaries has the following keys: 'row_idx', 'column_idx', 'value'.
The key 'row_idx' represent the index of a pixel's row (integer - int).
The key 'column_idx' represent the index of a pixel's column (integer - int).
The key 'value' represent the RGBA color of a pixel(list which contains four integers).
:rtype: list - []
"""
if rgba_array.shape[INDEX_OF_THE_NUMBER_OF_CHANNELS_PER_PIXEL] != CORRECT_NUMBER_OF_CHANNELS_PER_PIXEL:
error_info = "The passed image has the incorret number of channels per pixel. " \
"The correct number is equal to {correct_number_of_channels_per_pixel}.".format(
correct_number_of_channels_per_pixel=CORRECT_NUMBER_OF_CHANNELS_PER_PIXEL)
raise ValueError(error_info)
transparent_pixels = []
rows, cols, _ = np.where(
rgba_array[:, :, [INDEX_OF_THE_VALUE_OF_ALPHA_CHANNEL]] < MINIMUM_VALUE_OF_THE_ALPHA_CHANNEL)
for i in range(len(rows)):
row_idx = int(rows[i])
column_idx = int(cols[i])
pixel_value = rgba_array[row_idx][column_idx].tolist()
pixel_dictionary = {"row_idx": row_idx,
"column_idx": column_idx,
"value": pixel_value}
transparent_pixels.append(pixel_dictionary)
return transparent_pixels
@staticmethod
def add_transparent_pixels_to_an_rgb_image(rgb_array,
transparent_pixels):
"""
This function converts 'rgb_array' into an RGBA numpy array.
Then the pixels included in the passed list ('transparent_pixels') will be placed in this array.
:param rgb_array: an RGB image converted into a numpy array (the array has following shape(y, x, 3))
:type rgb_array: numpy.ndarray (https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html)
:param transparent_pixels: list of dictionaries.
Each of these dictionaries has the following keys: 'row_idx', 'column_idx', 'value'.
The key 'row_idx' represent the index of a pixel's row (integer - int).
The key 'column_idx' represent the index of a pixel's column (integer - int).
The key 'value' represent the RGBA color of a pixel(list which contains four integers).
This parameter should comes from calling
the function 'prepare_transparent_pixels' contained in this class.
:rtype: list - []
:return: 'rgb_array' converted into an RGBA numpy array.
The array possess the values of pixels included in the passed list('transparent_pixels').
:rtype: numpy.ndarray (https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html)
"""
pil = Image.fromarray(rgb_array)
pil_rgba = set_mode_of_pil(pil=pil, mode=PIL_MODE_OF_TRANSPARENT_PHOTOS)
rgba_array = np.array(pil_rgba, dtype=np.uint8)
for pixel in transparent_pixels:
for channel_idx in range(len(pixel["value"])):
rgba_array[pixel["row_idx"]][pixel["column_idx"]][channel_idx] = pixel["value"][channel_idx]
return rgba_array
def _process_existing_image(self):
"""
This function looks for the necessary parameters to swap the same part of the face.
In this case the user image has been saved previously in our database.
If the number of detected faces in the image is not equal to 1 the variable
'self._more_or_less_than_one_photo' will be set to 'True' otherwise
the following variables: 'self._src_rgb_array', 'self._src_endpoints',
'self._dst_rgb_array', 'self._dst_endpoints'.
will have appropriate values and the variable 'self._more_or_less_than_one_photo' will be set to 'False'.
"""
photo_from_db = DBFunc.get_user_photo_data(photo_in_base64=self._photo_in_base64)
self._number_of_detected_faces = photo_from_db.number_of_detected_faces
if self._number_of_detected_faces != 1:
self._more_or_less_than_one_photo = True
else:
self._more_or_less_than_one_photo = False
self._dst_rgb_array = convert_text_to_rgb_array(text=photo_from_db.rgb_array)
src_face = DBFunc.get_example_photo_data(part_of_face=self._part_of_face, row_id=self._face_id)
self._src_endpoints = ProcessUserPhoto.prepare_endpoints_from_db(face_landmarks=src_face.face_landmarks,
part_of_face=self._part_of_face)
self._src_rgb_array = convert_text_to_rgb_array(text=src_face.rgb_array)
self._dst_endpoints = ProcessUserPhoto.prepare_endpoints_from_db(
face_landmarks=photo_from_db.face_landmarks,
part_of_face=self._part_of_face)
self._transparent_pixels = json.loads(photo_from_db.transparent_pixels)
def _save_info_on_a_new_image(self,
faces_landmarks):
"""
This function saves informations about a new image into our database.
:param faces_landmarks: landmarks of a single face generated
by the function 'face_landmarks' from the module named 'face_recognition'.
(link to the module named 'face_recognition' - https://pypi.org/project/face_recognition/)
:type face_landmarks: dictionary - {}
"""
landmarks = ProcessUserPhoto.get_landmarks_of_parts_of_face(face_landmarks=faces_landmarks)
DBFunc.save_user_photo(photo_in_base64=self._photo_in_base64,
number_of_detected_faces=self._number_of_detected_faces,
rgb_array=convert_rgb_array_to_text(rgb_array=self._dst_rgb_array),
transparent_pixels=json.dumps(self._transparent_pixels),
face_landmarks=json.dumps(landmarks))
def _process_new_image(self):
"""
This function looks for the necessary parameters to swap the same part of the face.
In this case the user image is a completely new one.
If the number of detected faces in the image is not equal to 1 the variable
'self._more_or_less_than_one_photo' will be set to 'True' and
informations about this image will be saved into our database.
In another case, the following variables: 'self._src_rgb_array', 'self._src_endpoints',
'self._dst_rgb_array', 'self._dst_endpoints'.
will have appropriate values, the variable 'self._more_or_less_than_one_photo' will be set to 'False'
and informations about this image will be saved in our database.
"""
dst_img_pil = convert_base64_to_pil(photo_in_base64=self._photo_in_base64)
if not correct_size(img=dst_img_pil):
dst_img_pil = resize_img(img=dst_img_pil)
if dst_img_pil.mode != DEFAULT_PIL_MODE:
dst_rgba_array = np.array(set_mode_of_pil(pil=dst_img_pil, mode=PIL_MODE_OF_TRANSPARENT_PHOTOS),
dtype=np.uint8)
self._transparent_pixels = ProcessUserPhoto.prepare_transparent_pixels(rgba_array=dst_rgba_array)
dst_img_pil = set_mode_of_pil(pil=dst_img_pil, mode=DEFAULT_PIL_MODE)
self._dst_rgb_array = np.array(dst_img_pil, dtype=np.uint8)
faces_landmarks = get_faces_landmarks(rgb_array=self._dst_rgb_array)
self._number_of_detected_faces = len(faces_landmarks)
if self._number_of_detected_faces != 1:
DBFunc.save_user_photo(photo_in_base64=self._photo_in_base64,
number_of_detected_faces=self._number_of_detected_faces)
self._more_or_less_than_one_photo = True
else:
landmarks_of_the_part_of_face = LANDMARKS_FUNCTIONS[self._part_of_face](faces_landmarks[0])
src_face = DBFunc.get_example_photo_data(part_of_face=self._part_of_face, row_id=self._face_id)
self._src_endpoints = ProcessUserPhoto.prepare_endpoints_from_db(face_landmarks=src_face.face_landmarks,
part_of_face=self._part_of_face)
self._dst_endpoints = ProcessUserPhoto.prepare_params_to_face_swapping(part_of_face=self._part_of_face,
landmarks=landmarks_of_the_part_of_face)
self._src_rgb_array = convert_text_to_rgb_array(text=src_face.rgb_array)
self._more_or_less_than_one_photo = False
self._save_info_on_a_new_image(faces_landmarks=faces_landmarks[0])
def _swap_part_of_face(self):
"""
:return result of calling the function 'change_face_element' from the class 'ChangeFaceElement'
(The class is located in 'apps.face_element_swapping.change_faces').
| |
import codecs
import json
import os
from typing import Dict, Tuple, List
from nltk.tokenize import word_tokenize
def load_tokens_from_factrueval2016_by_paragraphs(text_file_name: str, tokens_file_name: str) -> \
Tuple[Dict[int, Tuple[int, int, str]], str, tuple]:
source_text = ''
start_pos = 0
tokens_and_their_bounds = dict()
line_idx = 1
bounds_of_paragraphs = []
texts_of_paragraphs = []
with codecs.open(text_file_name, mode='r', encoding='utf-8', errors='ignore') as fp:
cur_line = fp.readline()
while len(cur_line) > 0:
prep_line = cur_line.strip()
if len(prep_line) > 0:
texts_of_paragraphs.append(prep_line.lower())
cur_line = fp.readline()
paragraph_idx = 0
paragraph_pos = 0
with codecs.open(tokens_file_name, mode='r', encoding='utf-8', errors='ignore') as fp:
cur_line = fp.readline()
while len(cur_line) > 0:
prep_line = cur_line.strip()
if len(prep_line) > 0:
err_msg = 'File `{0}`: line {1} is wrong!'.format(tokens_file_name, line_idx)
parts_of_line = prep_line.split()
if len(parts_of_line) != 4:
raise ValueError(err_msg)
try:
token_id = int(parts_of_line[0])
except:
token_id = -1
if token_id < 0:
raise ValueError(err_msg)
try:
token_start = int(parts_of_line[1])
except:
token_start = -1
if token_start < len(source_text):
raise ValueError(err_msg)
try:
token_len = int(parts_of_line[2])
except:
token_len = -1
if token_len < 0:
raise ValueError(err_msg)
token_text = parts_of_line[3].strip()
if len(token_text) != token_len:
raise ValueError(err_msg)
if token_id in tokens_and_their_bounds:
raise ValueError(err_msg)
while len(source_text) < token_start:
source_text += ' '
source_text += token_text
tokens_and_their_bounds[token_id] = (
token_start, token_start + token_len,
token_text
)
found_idx_in_paragraph = texts_of_paragraphs[paragraph_idx][paragraph_pos:].find(token_text.lower())
if found_idx_in_paragraph < 0:
paragraph_idx += 1
paragraph_pos = 0
while paragraph_idx < len(texts_of_paragraphs):
if len(bounds_of_paragraphs) == 0:
bounds_of_paragraphs.append((0, start_pos))
else:
bounds_of_paragraphs.append((bounds_of_paragraphs[-1][1], start_pos))
found_idx_in_paragraph = texts_of_paragraphs[paragraph_idx].find(token_text.lower())
if found_idx_in_paragraph >= 0:
break
paragraph_idx += 1
if paragraph_idx >= len(texts_of_paragraphs):
raise ValueError(err_msg)
else:
paragraph_pos += (found_idx_in_paragraph + len(token_text))
start_pos = len(source_text)
cur_line = fp.readline()
line_idx += 1
if len(texts_of_paragraphs) > 0:
if len(bounds_of_paragraphs) > 0:
bounds_of_paragraphs.append((bounds_of_paragraphs[-1][1], start_pos))
else:
bounds_of_paragraphs.append((0, start_pos))
bounds_of_paragraphs_after_strip = []
for cur_bounds in bounds_of_paragraphs:
if cur_bounds[0] < cur_bounds[1]:
source_paragraph_text = source_text[cur_bounds[0]:cur_bounds[1]]
paragraph_text_after_strip = source_paragraph_text.strip()
found_idx = source_paragraph_text.find(paragraph_text_after_strip)
if found_idx > 0:
paragraph_start = cur_bounds[0] + found_idx
else:
paragraph_start = cur_bounds[0]
paragraph_end = paragraph_start + len(paragraph_text_after_strip)
bounds_of_paragraphs_after_strip.append((paragraph_start, paragraph_end))
else:
bounds_of_paragraphs_after_strip.append(cur_bounds)
return tokens_and_their_bounds, source_text, tuple(bounds_of_paragraphs_after_strip)
def load_tokens_from_factrueval2016_by_sentences(tokens_file_name: str) -> \
Tuple[Dict[int, Tuple[int, int, str]], str, tuple]:
source_text = ''
tokens_and_their_bounds = dict()
line_idx = 1
bounds_of_sentences = []
sentence_start = -1
sentence_end = -1
with codecs.open(tokens_file_name, mode='r', encoding='utf-8', errors='ignore') as fp:
cur_line = fp.readline()
while len(cur_line) > 0:
prep_line = cur_line.strip()
if len(prep_line) > 0:
err_msg = 'File `{0}`: line {1} is wrong!'.format(tokens_file_name, line_idx)
parts_of_line = prep_line.split()
if len(parts_of_line) != 4:
raise ValueError(err_msg)
try:
token_id = int(parts_of_line[0])
except:
token_id = -1
if token_id < 0:
raise ValueError(err_msg)
try:
token_start = int(parts_of_line[1])
except:
token_start = -1
if token_start < len(source_text):
raise ValueError(err_msg)
try:
token_len = int(parts_of_line[2])
except:
token_len = -1
if token_len < 0:
raise ValueError(err_msg)
token_text = parts_of_line[3].strip()
if len(token_text) != token_len:
raise ValueError(err_msg)
if token_id in tokens_and_their_bounds:
raise ValueError(err_msg)
while len(source_text) < token_start:
source_text += ' '
source_text += token_text
tokens_and_their_bounds[token_id] = (
token_start, token_start + token_len,
token_text
)
if sentence_start < 0:
sentence_start = token_start
sentence_end = token_start + token_len
else:
if (sentence_start >= 0) and (sentence_end >= 0):
bounds_of_sentences.append((sentence_start, sentence_end))
sentence_start = -1
sentence_end = -1
cur_line = fp.readline()
line_idx += 1
if (sentence_start >= 0) and (sentence_end >= 0):
bounds_of_sentences.append((sentence_start, sentence_end))
return tokens_and_their_bounds, source_text, tuple(bounds_of_sentences)
def load_spans_from_factrueval2016(spans_file_name: str,
tokens_dict: Dict[int, Tuple[int, int, str]]) -> Dict[int, List[int]]:
spans = dict()
line_idx = 1
with codecs.open(spans_file_name, mode='r', encoding='utf-8', errors='ignore') as fp:
cur_line = fp.readline()
while len(cur_line) > 0:
prep_line = cur_line.strip()
if len(prep_line) > 0:
err_msg = 'File `{0}`: line {1} is wrong!'.format(spans_file_name, line_idx)
parts_of_line = prep_line.split()
if len(parts_of_line) < 9:
raise ValueError(err_msg)
try:
span_id = int(parts_of_line[0])
except:
span_id = -1
if span_id < 0:
raise ValueError(err_msg)
if span_id not in spans:
try:
found_idx = parts_of_line.index('#')
except:
found_idx = -1
if found_idx < 0:
raise ValueError(err_msg)
if (len(parts_of_line) - 1 - found_idx) < 2:
raise ValueError(err_msg)
if (len(parts_of_line) - 1 - found_idx) % 2 != 0:
raise ValueError(err_msg)
n = (len(parts_of_line) - 1 - found_idx) // 2
token_IDs = []
try:
for idx in range(found_idx + 1, found_idx + n + 1):
new_token_ID = int(parts_of_line[idx])
if new_token_ID in token_IDs:
token_IDs = []
break
if new_token_ID not in tokens_dict:
token_IDs = []
break
token_IDs.append(new_token_ID)
if token_IDs[-1] < 0:
token_IDs = []
break
except:
token_IDs = []
if len(token_IDs) == 0:
raise ValueError(err_msg)
spans[span_id] = token_IDs
del token_IDs
cur_line = fp.readline()
line_idx += 1
return spans
def load_objects_from_factrueval2016(objects_file_name: str,
spans_dict: Dict[int, List[int]]) -> Dict[int, Tuple[str, List[int]]]:
objects = dict()
line_idx = 1
with codecs.open(objects_file_name, mode='r', encoding='utf-8', errors='ignore') as fp:
cur_line = fp.readline()
while len(cur_line) > 0:
prep_line = cur_line.strip()
if len(prep_line) > 0:
err_msg = 'File `{0}`: line {1} is wrong!'.format(objects_file_name, line_idx)
parts_of_line = prep_line.split()
if len(parts_of_line) < 5:
raise ValueError(err_msg)
try:
object_id = int(parts_of_line[0])
if object_id in objects:
object_id = -1
except:
object_id = -1
if object_id < 0:
raise ValueError(err_msg)
ne_type = parts_of_line[1].upper()
if ne_type in {'PERSON', 'LOCATION', 'ORG', 'LOCORG'}:
if ne_type == 'LOCORG':
ne_type = 'LOCATION'
try:
found_idx = parts_of_line.index('#')
except:
found_idx = -1
if found_idx < 3:
raise ValueError(err_msg)
span_IDs = []
try:
for idx in range(2, found_idx):
new_span_ID = int(parts_of_line[idx])
if new_span_ID < 0:
span_IDs = []
break
if new_span_ID not in spans_dict:
span_IDs = []
break
if new_span_ID in span_IDs:
span_IDs = []
break
span_IDs.append(new_span_ID)
except:
span_IDs = []
if len(span_IDs) == 0:
raise ValueError(err_msg)
objects[object_id] = (ne_type, span_IDs)
del span_IDs
cur_line = fp.readline()
line_idx += 1
return objects
def check_factrueval_tokenization(src_dir_name: str, split_by_paragraphs: bool):
factrueval_files = dict()
for cur_file_name in os.listdir(src_dir_name):
if cur_file_name.endswith('.objects'):
base_name = cur_file_name[:-len('.objects')]
elif cur_file_name.endswith('.spans'):
base_name = cur_file_name[:-len('.spans')]
elif cur_file_name.endswith('.tokens'):
base_name = cur_file_name[:-len('.tokens')]
else:
base_name = None
if base_name is not None:
if base_name in factrueval_files:
assert cur_file_name not in factrueval_files[base_name]
factrueval_files[base_name].append(cur_file_name)
else:
factrueval_files[base_name] = [cur_file_name]
for base_name in factrueval_files:
if len(factrueval_files[base_name]) != 3:
raise ValueError('Files list for `{0}` is wrong!'.format(base_name))
text_file_name = os.path.join(src_dir_name, base_name + '.txt')
if not os.path.isfile(text_file_name):
raise ValueError('File `{0}` does not exist!'.format(text_file_name))
factrueval_files[base_name].append(text_file_name)
factrueval_files[base_name] = sorted(factrueval_files[base_name])
n_good = 0
n_total = 0
for base_name in sorted(list(factrueval_files.keys())):
if split_by_paragraphs:
tokens, text, paragraphs = load_tokens_from_factrueval2016_by_paragraphs(
os.path.join(src_dir_name, base_name + '.txt'), os.path.join(src_dir_name, base_name + '.tokens')
)
else:
tokens, text, paragraphs = load_tokens_from_factrueval2016_by_sentences(
os.path.join(src_dir_name, base_name + '.tokens')
)
tokens_by_tokenizer = []
for paragraph_start, paragraph_end in paragraphs:
tokens_by_tokenizer += word_tokenize(text[paragraph_start:paragraph_end])
tokens_by_factrueval = []
for token_id in sorted(list(tokens.keys())):
tokens_by_factrueval.append(tokens[token_id][2])
tokens_by_tokenizer = tuple(tokens_by_tokenizer)
tokens_by_factrueval = tuple(tokens_by_factrueval)
if tokens_by_tokenizer == tokens_by_factrueval:
print('')
print('{0}'.format(base_name))
print('All right!')
print('')
n_good += 1
else:
print('')
print('{0}'.format(base_name))
print('')
print('true tokens:')
print('{0}'.format(tokens_by_factrueval))
print('')
print('calculated tokens:')
print('{0}'.format(tokens_by_tokenizer))
print('')
n_total += 1
print('')
print('Total number of texts is {0}.'.format(n_total))
print('Number of correctly tokenized texts is {0}.'.format(n_good))
def factrueval2016_to_json(src_dir_name: str, dst_json_name: str, split_by_paragraphs: bool=True):
factrueval_files = dict()
for cur_file_name in os.listdir(src_dir_name):
if cur_file_name.endswith('.objects'):
base_name = cur_file_name[:-len('.objects')]
elif cur_file_name.endswith('.spans'):
base_name = cur_file_name[:-len('.spans')]
elif cur_file_name.endswith('.tokens'):
base_name = cur_file_name[:-len('.tokens')]
else:
base_name = None
if base_name is not None:
if base_name in factrueval_files:
assert cur_file_name not in factrueval_files[base_name]
factrueval_files[base_name].append(cur_file_name)
else:
factrueval_files[base_name] = [cur_file_name]
for base_name in factrueval_files:
if len(factrueval_files[base_name]) != 3:
raise ValueError('Files list for `{0}` is wrong!'.format(base_name))
text_file_name = os.path.join(src_dir_name, base_name + '.txt')
if not os.path.isfile(text_file_name):
raise ValueError('File `{0}` does not exist!'.format(text_file_name))
factrueval_files[base_name].append(text_file_name)
factrueval_files[base_name] = sorted(factrueval_files[base_name])
train_data = []
for base_name in sorted(list(factrueval_files.keys())):
if split_by_paragraphs:
tokens, text, paragraphs = load_tokens_from_factrueval2016_by_paragraphs(
os.path.join(src_dir_name, base_name + '.txt'), os.path.join(src_dir_name, base_name + '.tokens')
)
else:
tokens, text, paragraphs = load_tokens_from_factrueval2016_by_sentences(
os.path.join(src_dir_name, base_name + '.tokens')
)
spans = load_spans_from_factrueval2016(os.path.join(src_dir_name, base_name + '.spans'), tokens)
objects = load_objects_from_factrueval2016(os.path.join(src_dir_name, base_name + '.objects'), spans)
named_entities = dict()
if len(objects) > 0:
for object_ID in objects:
ne_type = objects[object_ID][0]
tokens_of_ne = set()
spans_of_ne = objects[object_ID][1]
for span_ID in spans_of_ne:
tokens_of_ne |= set(spans[span_ID])
tokens_of_ne = sorted(list(tokens_of_ne))
if len(tokens_of_ne) > 0:
token_ID = tokens_of_ne[0]
ne_start = tokens[token_ID][0]
ne_end = tokens[token_ID][1]
for token_ID in tokens_of_ne[1:]:
if tokens[token_ID][0] < ne_start:
ne_start = tokens[token_ID][0]
if tokens[token_ID][1] > ne_end:
ne_end = tokens[token_ID][1]
if ne_type in named_entities:
named_entities[ne_type].append((ne_start, ne_end))
else:
named_entities[ne_type] = [(ne_start, ne_end)]
train_data.append({'text': text, 'named_entities': named_entities, 'paragraph_bounds': paragraphs,
'base_name': base_name})
with codecs.open(dst_json_name, mode='w', encoding='utf-8', errors='ignore') as fp:
json.dump(train_data, fp, indent=4, ensure_ascii=False)
def recognized_factrueval2016_to_json(gold_dir_name: str, recognized_dir_name: str, dst_json_name: str):
factrueval_files = dict()
for cur_file_name in os.listdir(gold_dir_name):
if cur_file_name.endswith('.objects'):
base_name = | |
oranges_r(self):
cname = "oranges_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_3(self):
cname = "oranges_3"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_3.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_3_r(self):
cname = "oranges_3_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_3.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_4(self):
cname = "oranges_4"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_4.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_4_r(self):
cname = "oranges_4_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_4.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_5(self):
cname = "oranges_5"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_5.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_5_r(self):
cname = "oranges_5_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_5.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_6(self):
cname = "oranges_6"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_6.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_6_r(self):
cname = "oranges_6_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_6.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_7(self):
cname = "oranges_7"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_7.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_7_r(self):
cname = "oranges_7_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_7.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_8(self):
cname = "oranges_8"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_8.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_8_r(self):
cname = "oranges_8_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_8.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_9(self):
cname = "oranges_9"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_9.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def oranges_9_r(self):
cname = "oranges_9_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "oranges_9.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd(self):
cname = "orrd"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_r(self):
cname = "orrd_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_3(self):
cname = "orrd_3"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_3.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_3_r(self):
cname = "orrd_3_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_3.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_4(self):
cname = "orrd_4"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_4.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_4_r(self):
cname = "orrd_4_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_4.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_5(self):
cname = "orrd_5"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_5.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_5_r(self):
cname = "orrd_5_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_5.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_6(self):
cname = "orrd_6"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_6.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_6_r(self):
cname = "orrd_6_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_6.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_7(self):
cname = "orrd_7"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_7.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_7_r(self):
cname = "orrd_7_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_7.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_8(self):
cname = "orrd_8"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_8.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_8_r(self):
cname = "orrd_8_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_8.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_9(self):
cname = "orrd_9"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_9.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def orrd_9_r(self):
cname = "orrd_9_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "orrd_9.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired(self):
cname = "paired"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_r(self):
cname = "paired_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_10(self):
cname = "paired_10"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_10.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_10_r(self):
cname = "paired_10_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_10.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_11(self):
cname = "paired_11"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_11.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_11_r(self):
cname = "paired_11_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_11.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_12(self):
cname = "paired_12"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_12.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_12_r(self):
cname = "paired_12_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_12.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_3(self):
cname = "paired_3"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_3.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_3_r(self):
cname = "paired_3_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_3.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_4(self):
cname = "paired_4"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_4.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_4_r(self):
cname = "paired_4_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_4.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_5(self):
cname = "paired_5"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_5.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_5_r(self):
cname = "paired_5_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_5.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_6(self):
cname = "paired_6"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_6.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_6_r(self):
cname = "paired_6_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_6.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_7(self):
cname = "paired_7"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_7.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_7_r(self):
cname = "paired_7_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_7.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_8(self):
cname = "paired_8"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_8.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_8_r(self):
cname = "paired_8_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "colorbrewer", "paired_8.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def paired_9(self):
cname = "paired_9"
if | |
<gh_stars>10-100
"""Base destructors and destructor mixins."""
from __future__ import division, print_function
import logging
import warnings
from abc import abstractmethod
from builtins import super
from copy import deepcopy
from functools import wraps
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.exceptions import DataConversionWarning, NotFittedError
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
# noinspection PyProtectedMember
from .utils import (_INF_SPACE, _UNIT_SPACE, check_X_in_interval, get_domain_or_default,
get_support_or_default)
logger = logging.getLogger(__name__)
class ScoreMixin(object):
"""Mixin for :func:`score` that returns mean of :func:`score_samples`."""
def score(self, X, y=None):
"""Return the mean log likelihood (or log(det(Jacobian))).
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples and n_features
is the number of features.
y : None, default=None
Not used but kept for compatibility.
Returns
-------
log_likelihood : float
Mean log likelihood data points in X.
"""
return np.mean(self.score_samples(X, y))
class DestructorMixin(ScoreMixin, TransformerMixin):
"""Mixin helper class to add universal destructor methods.
Adds ``sample``, ``get_domain``, and ``score`` *if* the destructor
defines the ``density_`` attribute after fitting. (Also, if the
destructor defines the attribute ``n_features_``, no sampling is
required to determine the number of features, see note below.)
Note that this finds the data dimension by looking sequentally for
the fitted ``n_features_`` attribute, the ``density_.n_features_``
attribute, and finally attempting to call `self.density_.sample(1)`
and determine the dimension from the density sample.
"""
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from this density/destructor.
Parameters
----------
n_samples : int, default=1
Number of samples to generate. Defaults to 1.
random_state : int, RandomState instance or None, optional (default=None)
If int, `random_state` is the seed used by the random number
generator; If :class:`~numpy.random.RandomState` instance,
`random_state` is the random number generator; If None, the random
number generator is the :class:`~numpy.random.RandomState` instance
used by :mod:`numpy.random`.
Returns
-------
X : array, shape (n_samples, n_features)
Randomly generated sample.
"""
rng = check_random_state(random_state)
U = rng.rand(n_samples, self._get_n_features())
X = self.inverse_transform(U)
return X
# Utility method to attempt to automatically determine the number of dimensions.
def _get_n_features(self):
return get_n_features(self)
def get_n_features(destructor, try_destructor_sample=False):
"""Get the number of features for a fitted destructor.
Attempt to find ``n_features`` either from
``destructor.n_features_``, ``destructor.density_.n_features_``,
or via density sampling ``destructor.density_.sample(1,
random_state=0).shape[1]``.
Parameters
----------
destructor : estimator
The (fitted) destructor from which to extract the number of features.
try_destructor_sample : bool, optional, default=False
If ``True``, additionally attempt ``destructor.sample(1,
random_state=0).shape[ 1]``. This option could cause infinite
recursion since :class:`~ddl.base.DestructorMixin` uses
:func:`get_n_features` in order to sample but this can be avoided if
the destructor reimplements sample without :func:`get_n_features`
such as in the :class:`ddl.base.CompositeDestructor`.
"""
n_features = np.nan
if hasattr(destructor, 'n_features_'):
n_features = destructor.n_features_
elif hasattr(destructor, 'density_') and hasattr(destructor.density_, 'n_features_'):
n_features = destructor.density_.n_features_
elif hasattr(destructor, 'density_') and hasattr(destructor.density_, 'sample'):
warnings.warn('Because `destructor.n_features_` does not exist and'
' `destructor.density_.n_features_` does not exist'
' we attempt to determine the dimension by sampling'
' from destructor.density_, which may be computationally'
' demanding. Add destructor.n_features_ to reduce time if necessary.',
_NumDimWarning)
n_features = np.array(destructor.density_.sample(n_samples=1, random_state=0)).shape[1]
else:
if try_destructor_sample:
# Attempt to sample from destructor
if hasattr(destructor, 'sample'):
try:
n_features = np.array(
destructor.sample(n_samples=1, random_state=0)
).shape[1]
except RuntimeError:
err = True
else:
err = False
else:
err = True
if err:
raise RuntimeError(
'Could not find n_features in destructor.n_features_, '
'destructor.density_.n_features_, '
'destructor.density_.sample(1).shape[1], or destructor.sample('
'1).shape[1]. '
)
else:
raise RuntimeError('Could not find n_features in destructor or density.'
'Checked destructor.n_features_, destructor.density_.n_features_, '
'and '
' attempted to sample from destructor.density_ to determine'
' n_features but failed in all cases.')
return n_features
class BoundaryWarning(DataConversionWarning):
"""Warning that data is on the boundary of the required set.
Warning when data is on the boundary of the domain or range and
is converted to data that lies inside the boundary. For example, if
the domain is (0,inf) rather than [0,inf), values of 0 will be made
a small epsilon above 0.
"""
class _NumDimWarning(UserWarning):
"""Warning about the number of dimensions.
Warning that we have to use 1 sample in order to determine the
number of dimensions. (Because `trans.n_features_` does not exist and
``trans.density_.n_features_` does not exist we attempt to determine the
dimension by sampling from self.density_, which may be
computationally demanding. Add self.n_features_ to reduce time if
necessary.)
"""
class BaseDensityDestructor(BaseEstimator, DestructorMixin):
"""Abstract destructor derived from an explicit underlying density.
This should be used if the destructor is based on an *explicit*
underlying density such as a ``TreeDestructor`` or
``IndepedentDestructor``.
The only methods that need to be implemented in this case are
``get_density_estimator``, ``transform`` and ``inverse_transform``.
Attributes
----------
density_ : estimator
Fitted underlying density.
"""
@abstractmethod
def _get_density_estimator(self):
"""(Abstract) Get density estimator."""
raise NotImplementedError()
@abstractmethod
def transform(self, X, y=None):
"""[Placeholder].
Parameters
----------
X :
y :
"""
raise NotImplementedError()
@abstractmethod
def inverse_transform(self, X, y=None):
"""[Placeholder].
Parameters
----------
X :
y :
"""
raise NotImplementedError()
def fit(self, X, y=None, density_fit_params=None):
"""[Placeholder].
Parameters
----------
X :
y :
density_fit_params :
Returns
-------
obj : object
"""
if density_fit_params is None:
density_fit_params = {}
density = clone(self._get_density_estimator()).fit(X, y, **density_fit_params)
self.fit_from_density(density)
return self
def fit_from_density(self, density):
"""[Placeholder].
Parameters
----------
density :
Returns
-------
obj : object
"""
self.density_ = density
return self
def score_samples(self, X, y=None):
"""Compute log-likelihood (or log(det(Jacobian))) for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples and n_features
is the number of features.
y : None, default=None
Not used but kept for compatibility.
Returns
-------
log_likelihood : array, shape (n_samples,)
Log likelihood of each data point in X.
"""
self._check_is_fitted()
X = check_array(X, ensure_min_samples=0)
X = check_X_in_interval(X, get_domain_or_default(self))
return self.density_.score_samples(X)
def get_domain(self):
"""Get the domain of this destructor.
Returns
-------
domain : array-like, shape (2,) or shape (n_features, 2)
If shape is (2, ), then ``domain[0]`` is the minimum and
``domain[1]`` is the maximum for all features. If shape is
(`n_features`, 2), then each feature's domain (which could
be different for each feature) is given similar to the first
case.
"""
# Either get from the density estimator parameter
# or fitted density attribute
try:
self._check_is_fitted()
except NotFittedError:
return get_support_or_default(self._get_density_estimator())
else:
return get_support_or_default(self.density_)
def _check_is_fitted(self):
check_is_fitted(self, ['density_'])
class IdentityDestructor(BaseDensityDestructor):
"""Identity destructor/transform.
This assumes a canonical uniform density on the unit hypercube and
has a domain of [0, 1].
Attributes
----------
density_ : estimator
Fitted underlying density.
See Also
--------
UniformDensity
"""
@classmethod
def create_fitted(cls, n_features):
destructor = cls()
destructor.density_ = UniformDensity.create_fitted(n_features)
return destructor
def _get_density_estimator(self):
"""Get the *unfitted* density associated with this destructor.
NOTE: The returned estimator is NOT fitted but is a clone or new
instantiation of the underlying density estimator. This is just
a helper function that needs to be overridden by subclasses of
:class:`~ddl.base.BaseDensityDestructor`.
Returns
-------
density : estimator
The *unfitted* density estimator associated wih this
destructor.
"""
return UniformDensity()
def transform(self, X, y=None, copy=True):
"""[Placeholder].
Parameters
----------
X :
y :
copy :
Returns
-------
obj : object
"""
self._check_is_fitted()
X = check_array(X, ensure_min_samples=0)
X = check_X_in_interval(X, get_domain_or_default(self))
self._check_dim(X)
if copy:
X = X.copy()
return X
def inverse_transform(self, X, y=None, copy=True):
"""[Placeholder].
Parameters
----------
X :
y :
copy :
Returns
-------
obj : object
"""
self._check_is_fitted()
X = check_array(X, ensure_min_samples=0)
X = check_X_in_interval(X, np.array([0, 1]))
self._check_dim(X)
if copy:
X = X.copy()
return X
def get_domain(self):
"""Get the domain of this destructor.
Returns
-------
domain : array-like, shape (2,) or shape (n_features, 2)
If shape is (2, ), then ``domain[0]`` is the minimum and
``domain[1]`` is the maximum for all features. If shape is
(`n_features`, 2), then each feature's domain (which could
be different for each feature) is given similar to the first
case.
"""
return np.array([0, 1])
def _check_dim(self, X):
if X.shape[1] != self.density_.n_features_:
raise ValueError('Dimension of input does not match dimension of the original '
'training data.')
class UniformDensity(BaseEstimator, ScoreMixin):
"""Uniform density estimator.
Only the ``n_features_`` attribute needs fitting. This nearly
trivial density is used as the underlying density for the
``IdentityDestructor``.
Attributes
----------
n_features_ : int
Number of features of the training data.
See Also
--------
IdentityDestructor
"""
def __init__(self):
pass
def fit(self, X, y=None):
"""Fit estimator to X.
Parameters
----------
| |
Only attributes that are assignable to
this type are returned.
inherit: Specifies whether to search this member's inheritance chain to find the
attributes.
Returns: An array of custom attributes applied to this member, or an array with zero (0)
elements if no attributes have been applied.
"""
pass
def GetDefaultMembers(self):
"""
GetDefaultMembers(self: _Type) -> Array[MemberInfo]
Provides COM objects with version-independent access to the
System.Type.GetDefaultMembers method.
Returns: An array of System.Reflection.MemberInfo objects representing all default
members of the current System.Type.-or- An empty array of type
System.Reflection.MemberInfo, if the current System.Type does not have default
members.
"""
pass
def GetElementType(self):
"""
GetElementType(self: _Type) -> Type
Provides COM objects with version-independent access to the
System.Type.GetElementType method.
Returns: The System.Type of the object encompassed or referred to by the current array,
pointer or reference type.-or- null if the current System.Type is not an array
or a pointer, or is not passed by reference, or represents a generic type or a
type parameter of a generic type or method definition.
"""
pass
def GetEvent(self, name, bindingAttr=None):
"""
GetEvent(self: _Type, name: str) -> EventInfo
Provides COM objects with version-independent access to the
System.Type.GetEvent(System.String) method.
name: A bitmask comprised of one or more System.Reflection.BindingFlags that specify
how the search is conducted.-or- Zero, to return null.
Returns: An array of System.Reflection.EventInfo objects representing all events that
are declared or inherited by the current System.Type that match the specified
binding constraints.-or- An empty array of type System.Reflection.EventInfo, if
the current System.Type does not have events, or if none of the events match
the binding constraints.
GetEvent(self: _Type, name: str, bindingAttr: BindingFlags) -> EventInfo
Provides COM objects with version-independent access to the
System.Type.GetEvent(System.String,System.Reflection.BindingFlags) method.
name: The System.String containing the name of an event that is declared or inherited
by the current System.Type.
bindingAttr: A bitmask comprised of one or more System.Reflection.BindingFlags that specify
how the search is conducted.-or- Zero, to return null.
Returns: The System.Reflection.EventInfo object representing the specified event that is
declared or inherited by the current System.Type, if found; otherwise, null.
"""
pass
def GetEvents(self, bindingAttr=None):
"""
GetEvents(self: _Type, bindingAttr: BindingFlags) -> Array[EventInfo]
Provides COM objects with version-independent access to the
System.Type.GetEvents(System.Reflection.BindingFlags) method.
bindingAttr: A bitmask comprised of one or more System.Reflection.BindingFlags that specify
how the search is conducted.-or- Zero, to return null.
Returns: An array of System.Reflection.EventInfo objects representing all events that
are declared or inherited by the current System.Type that match the specified
binding constraints.-or- An empty array of type System.Reflection.EventInfo, if
the current System.Type does not have events, or if none of the events match
the binding constraints.
GetEvents(self: _Type) -> Array[EventInfo]
Provides COM objects with version-independent access to the
System.Type.GetEvents method.
Returns: An array of System.Reflection.EventInfo objects representing all the public
events that are declared or inherited by the current System.Type.-or- An empty
array of type System.Reflection.EventInfo, if the current System.Type does not
have public events.
"""
pass
def GetField(self, name, bindingAttr=None):
"""
GetField(self: _Type, name: str) -> FieldInfo
Provides COM objects with version-independent access to the
System.Type.GetField(System.String) method.
name: The System.String containing the name of the data field to get.
Returns: A System.Reflection.FieldInfo object representing the public field with the
specified name, if found; otherwise, null.
GetField(self: _Type, name: str, bindingAttr: BindingFlags) -> FieldInfo
Provides COM objects with version-independent access to the
System.Type.GetField(System.String,System.Reflection.BindingFlags) method.
name: The System.String containing the name of the data field to get.
bindingAttr: A bitmask comprised of one or more System.Reflection.BindingFlags that specify
how the search is conducted.-or- Zero, to return null.
Returns: A System.Reflection.FieldInfo object representing the field that matches the
specified requirements, if found; otherwise, null.
"""
pass
def GetFields(self, bindingAttr=None):
"""
GetFields(self: _Type) -> Array[FieldInfo]
Provides COM objects with version-independent access to the
System.Type.GetFields method.
Returns: An array of System.Reflection.FieldInfo objects representing all the public
fields defined for the current System.Type.-or- An empty array of type
System.Reflection.FieldInfo, if no public fields are defined for the current
System.Type.
GetFields(self: _Type, bindingAttr: BindingFlags) -> Array[FieldInfo]
Provides COM objects with version-independent access to the
System.Type.GetFields(System.Reflection.BindingFlags) method.
bindingAttr: A bitmask comprised of one or more System.Reflection.BindingFlags that specify
how the search is conducted.-or- Zero, to return null.
Returns: An array of System.Reflection.FieldInfo objects representing all fields defined
for the current System.Type that match the specified binding constraints.-or-
An empty array of type System.Reflection.FieldInfo, if no fields are defined
for the current System.Type, or if none of the defined fields match the binding
constraints.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: _Type) -> int
Provides COM objects with version-independent access to the
System.Type.GetHashCode method.
Returns: An System.Int32 containing the hash code for this instance.
"""
pass
def GetIDsOfNames(self, riid, rgszNames, cNames, lcid, rgDispId):
"""
GetIDsOfNames(self: _Type, riid: Guid, rgszNames: IntPtr, cNames: UInt32, lcid: UInt32, rgDispId: IntPtr) -> Guid
Maps a set of names to a corresponding set of dispatch identifiers.
riid: Reserved for future use. Must be IID_NULL.
rgszNames: Passed-in array of names to be mapped.
cNames: Count of the names to be mapped.
lcid: The locale context in which to interpret the names.
rgDispId: Caller-allocated array that receives the IDs corresponding to the names.
"""
pass
def GetInterface(self, name, ignoreCase=None):
"""
GetInterface(self: _Type, name: str) -> Type
Provides COM objects with version-independent access to the
System.Type.GetInterface(System.String) method.
name: The System.String containing the name of the interface to get. For generic
interfaces, this is the mangled name.
Returns: A System.Type object representing the interface with the specified name,
implemented or inherited by the current System.Type, if found; otherwise, null.
GetInterface(self: _Type, name: str, ignoreCase: bool) -> Type
Provides COM objects with version-independent access to the
System.Type.GetInterface(System.String,System.Boolean) method.
name: The System.String containing the name of the interface to get. For generic
interfaces, this is the mangled name.
ignoreCase: true to perform a case-insensitive search for name.-or- false to perform a
case-sensitive search for name.
Returns: A System.Type object representing the interface with the specified name,
implemented or inherited by the current System.Type, if found; otherwise, null.
"""
pass
def GetInterfaceMap(self, interfaceType):
"""
GetInterfaceMap(self: _Type, interfaceType: Type) -> InterfaceMapping
Provides COM objects with version-independent access to the
System.Type.GetInterfaceMap(System.Type) method.
interfaceType: The System.Type of the interface of which to retrieve a mapping.
Returns: An System.Reflection.InterfaceMapping object representing the interface mapping
for interfaceType.
"""
pass
def GetInterfaces(self):
"""
GetInterfaces(self: _Type) -> Array[Type]
Provides COM objects with version-independent access to the
System.Type.GetInterfaces method.
Returns: An array of System.Type objects representing all the interfaces implemented or
inherited by the current System.Type.-or- An empty array of type System.Type,
if no interfaces are implemented or inherited by the current System.Type.
"""
pass
def GetMember(self, name, *__args):
"""
GetMember(self: _Type, name: str) -> Array[MemberInfo]
Provides COM objects with version-independent access to the
System.Type.GetMember(System.String) method.
name: The System.String containing the name of the public members to get.
Returns: An array of System.Reflection.MemberInfo objects representing the public
members with the specified name, if found; otherwise, an empty array.
GetMember(self: _Type, name: str, bindingAttr: BindingFlags) -> Array[MemberInfo]
Provides COM objects with version-independent access to the
System.Type.GetMember(System.String,System.Reflection.BindingFlags) method.
name: The System.String containing the name of the members to get.
| |
"""
Implementation of the method proposed in the paper:
'Adversarial Attacks on Graph Neural Networks via Meta Learning'
by <NAME>, <NAME>
Published at ICLR 2019 in New Orleans, USA.
Copyright (C) 2019
<NAME>
Technical University of Munich
"""
import tensorflow.compat.v1 as tf
import numpy as np
from metattack import utils
import scipy.sparse as sp
from tensorflow.keras.initializers import glorot_uniform
tf.disable_v2_behavior()
try:
from tqdm import tqdm
except ImportError:
tqdm = lambda x, desc=None: x
class GNNAttack:
"""
Base class for attacks on GNNs.
"""
def __init__(self, adjacency_matrix, attribute_matrix, labels_onehot, hidden_sizes, train_iters=100, gpu_id=None,
attack_features=False, dtype=tf.float32):
"""
Parameters
----------
adjacency_matrix: np.array [N,N]
Unweighted, symmetric adjacency matrix where N is the number of nodes.
attribute_matrix: sp.spmatrix or np.array [N,D]
Attribute matrix where D is the number of attributes per node.
labels_onehot: np.array [N,K]
One-hot matrix of class labels, where N is the number of nodes. Labels of the unlabeled nodes should come
from self-training using only the labels of the labeled nodes.
hidden_sizes: list of ints
List that defines the number of hidden units per hidden layer. Input and output layers not included.
train_iters: int
The number of 'inner' training steps of the GCN
gpu_id: int or None
GPU to use. None means CPU-only
attack_features: bool
Whether to also attack the node attributes (in addition to the graph structure).
"""
self.N, self.D = attribute_matrix.shape
self.K = labels_onehot.shape[1]
self.hidden_sizes = hidden_sizes
self.graph = tf.Graph()
self.train_iters = train_iters
self.dtype = dtype
with self.graph.as_default():
self.labels_onehot = labels_onehot
self.idx_labeled = tf.placeholder(dtype=tf.int32, shape=[None, ], name="Labeled_Idx")
self.idx_unlabeled = tf.placeholder(dtype=tf.int32, shape=[None, ], name="Unlabeled_Idx")
self.idx_attack = tf.placeholder(dtype=tf.int32, shape=[None, ], name="Attack_Idx")
self.attack_features = attack_features
if sp.issparse(adjacency_matrix):
adjacency_matrix = adjacency_matrix.toarray()
assert np.allclose(adjacency_matrix, adjacency_matrix.T)
self.sparse_attributes = sp.issparse(attribute_matrix)
if attack_features:
if self.sparse_attributes:
attrs_unique = np.unique(attribute_matrix.toarray())
# convert attributes to dense to make them attackable
attribute_matrix = attribute_matrix.toarray()
self.sparse_attributes = False
else:
attrs_unique = np.unique(attribute_matrix)
if len(attrs_unique) > 2 or not np.allclose(attrs_unique, [0, 1]):
raise ValueError("Attacks on the node features are currently only supported for binary attributes.")
w_init = glorot_uniform
weights = []
biases = []
velocities = []
bias_velocities = []
previous_size = self.D
for ix, layer_size in enumerate(self.hidden_sizes):
weight = tf.get_variable(f"W_{ix + 1}", shape=[previous_size, layer_size], dtype=self.dtype,
initializer=w_init())
bias = tf.get_variable(f"b_{ix + 1}", shape=[layer_size], dtype=self.dtype,
initializer=w_init())
w_velocity = tf.Variable(np.zeros(weight.shape), dtype=self.dtype, name=f"Velocity_{ix + 1}")
b_velocity = tf.Variable(np.zeros(bias.shape), dtype=self.dtype, name=f"b_Velocity_{ix + 1}")
weights.append(weight)
velocities.append(w_velocity)
bias_velocities.append(b_velocity)
biases.append(bias)
previous_size = layer_size
output_weight = tf.get_variable(f"W_{len(self.hidden_sizes) + 1}", shape=[previous_size, self.K],
dtype=self.dtype,
initializer=w_init())
output_bias = tf.get_variable(f"b_{len(self.hidden_sizes) + 1}", shape=[self.K], dtype=self.dtype,
initializer=w_init())
output_velocity = tf.Variable(np.zeros(output_weight.shape), dtype=self.dtype,
name=f"Velocity_{len(self.hidden_sizes) + 1}")
output_bias_velocity = tf.Variable(np.zeros(output_bias.shape), dtype=self.dtype,
name=f"b_Velocity_{len(self.hidden_sizes) + 1}")
weights.append(output_weight)
velocities.append(output_velocity)
biases.append(output_bias)
bias_velocities.append(output_bias_velocity)
with tf.name_scope("input"):
self.adjacency_orig = tf.constant(adjacency_matrix, dtype=self.dtype, name="Adjacency")
# The variable storing the changes to the adjacency matrix. Shape [N*N]
self.adjacency_changes = tf.Variable(np.zeros(adjacency_matrix.size), dtype=self.dtype,
name="Adjacency_delta")
# reshape to [N, N] and set the diagonal to 0
tf_adjacency_square = tf.matrix_set_diag(tf.reshape(self.adjacency_changes, adjacency_matrix.shape),
tf.zeros(adjacency_matrix.shape[0], dtype=self.dtype))
# Symmetrize and clip to [-1,1]
tf_adjacency_delta_symm = tf.clip_by_value(tf_adjacency_square + tf.transpose(tf_adjacency_square), -1,
1)
self.modified_adjacency = self.adjacency_orig + tf_adjacency_delta_symm
adj_selfloops = tf.add(self.modified_adjacency, tf.diag(tf.ones([self.N], dtype=self.dtype)))
inv_degrees = tf.pow(tf.reduce_sum(adj_selfloops, axis=0), -0.5)
self.adj_norm = tf.multiply(tf.multiply(adj_selfloops, inv_degrees[:, None]),
inv_degrees[None, :], name="normalized_adjacency")
if attack_features:
self.attributes_orig = tf.constant(attribute_matrix, name="Original_attributes",
dtype=self.dtype)
self.attribute_changes = tf.Variable(np.zeros(attribute_matrix.size), dtype=self.dtype)
tf_attributes_reshaped = tf.reshape(tf.clip_by_value(self.attribute_changes, 0, 1),
attribute_matrix.shape)
self.attributes = tf.clip_by_value(self.attributes_orig + tf_attributes_reshaped, 0, 1,
name="Modified_attributes")
else:
if self.sparse_attributes:
self.attributes = tf.SparseTensor(np.array(attribute_matrix.nonzero()).T,
attribute_matrix[attribute_matrix.nonzero()].A1,
attribute_matrix.shape)
self.attributes = tf.cast(self.attributes, dtype=dtype, name="Attributes_sparse")
else:
self.attributes = tf.constant(attribute_matrix, name="Attribute_matrix", dtype=self.dtype)
self.all_weights = [[w for w in weights]]
self.all_biases = [[b for b in biases]]
self.all_velocities = [[w for w in velocities]]
self.all_velocities_bias = [[w for w in bias_velocities]]
if gpu_id is None:
config = tf.ConfigProto(
device_count={'GPU': 0}
)
else:
gpu_options = tf.GPUOptions(visible_device_list='{}'.format(gpu_id), allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
session = tf.Session(config=config)
self.session = session
def filter_potential_singletons(self):
"""
Computes a mask for entries potentially leading to singleton nodes, i.e. one of the two nodes corresponding to
the entry have degree 1 and there is an edge between the two nodes.
Returns
-------
tf.Tensor shape [N, N], float with ones everywhere except the entries of potential singleton nodes,
where the returned tensor has value 0.
"""
degrees = tf.reduce_sum(self.modified_adjacency, axis=0)
degree_one = tf.equal(degrees, 1, name="degree_equals_one")
resh = tf.reshape(tf.tile(degree_one, [self.N]), [self.N, self.N], name="degree_one_square")
l_and = tf.logical_and(resh, tf.equal(self.modified_adjacency, 1))
logical_and_symmetric = tf.logical_or(l_and, tf.transpose(l_and))
flat_mask = tf.cast(tf.logical_not(tf.reshape(logical_and_symmetric, [-1])), self.dtype)
return flat_mask
def log_likelihood_constraint(self, ll_cutoff):
"""
Computes a mask for entries that, if the edge corresponding to the entry is added/removed, would lead to the
log likelihood constraint to be violated.
Parameters
----------
ll_cutoff: float
Cutoff value for the unnoticeability constraint. Smaller means stricter constraint. 0.004 corresponds to a
p-value of 0.95 in the Chi-square distribution with one degree of freedom.
Returns
-------
allowed_mask: tf.Tensor shape [N, N], dtype float
ones everywhere except the entries that, if an edge is added/removed, would violate the log likelihood
constraint. There, the returned tensor has value 0.
current_ratio: tf.Tensor, scalar, dtype float
current value of the Chi-square test.
"""
t_d_min = tf.constant(2, dtype=self.dtype)
t_possible_edges = tf.constant(np.array(np.triu(np.ones((self.N, self.N)), k=1).nonzero()).T,
dtype=tf.uint16)
allowed_mask, current_ratio = utils.likelihood_ratio_filter(t_possible_edges,
self.modified_adjacency,
self.adjacency_orig, t_d_min,
ll_cutoff)
return allowed_mask, current_ratio
class GNNMetaApprox(GNNAttack):
"""
Class for attacking GNNs with approximate meta gradients.
"""
def __init__(self, adjacency_matrix, attribute_matrix, labels_onehot, hidden_sizes, train_iters=100, gpu_id=None,
_lambda=0.5, dtype=tf.float32):
"""
Parameters
----------
adjacency_matrix: np.array [N,N]
Unweighted, symmetric adjacency matrix where N is the number of nodes.
attribute_matrix: sp.spmatrix or np.array [N,D]
Attribute matrix where D is the number of attributes per node.
labels_onehot: np.array [N,K]
One-hot matrix of class labels, where N is the number of nodes. Labels of the unlabeled nodes should come
from self-training using only the labels of the labeled nodes.
hidden_sizes: list of ints
List that defines the number of hidden units per hidden layer. Input and output layers not included.
train_iters: int
The number of 'inner' training steps of the GCN
gpu_id: int or None
GPU to use. None means CPU-only
_lambda: float between 0 and 1 (inclusive)
Weighting of the gradients of the losses of the labeled and unlabeled nodes. _lambda=1 corresponds to only
considering the loss on the labeled nodes, _lambda=0 only unlabeled nodes.
"""
super().__init__(adjacency_matrix, attribute_matrix, labels_onehot, hidden_sizes, train_iters, gpu_id,
False, dtype)
self.lambda_ = _lambda
self.logits = None
self.classification_loss = None
self.optimizer = None
self.train_op = None
self.grad_sum = None
self.adjacency_grad = None
self.grad_sum_add = None
self.grad_sum_mod = None
self.adjacency_update = None
self.ll_ratio = None
def build(self, with_relu=False, learning_rate=1e-2, with_bias=False):
"""
Construct the model and create the weight variables.
Parameters
----------
with_relu: bool
Whether to use the ReLU activation in the hidden layers
learning_rate: float
Learning rate for training.
with_bias: bool
Whether to use the bias terms during the attack.
"""
with self.graph.as_default():
weights = self.all_weights[-1]
bias = self.all_biases[-1]
hidden = self.attributes
for ix, w in enumerate(weights):
b = bias[ix]*float(with_bias)
if ix == 0 and self.sparse_attributes:
if self.dtype != tf.float32: # sparse matmul is unfortunately not implemented for float16
hidden = self.adj_norm @ tf.cast(tf.sparse_tensor_dense_matmul(tf.cast(hidden, tf.float32),
tf.cast(w, tf.float32)),
self.dtype) + b
else:
hidden = self.adj_norm @ tf.sparse_tensor_dense_matmul(hidden, w) + b
else:
hidden = self.adj_norm @ hidden @ w + b
if with_relu:
hidden = tf.nn.relu(hidden)
self.logits = hidden
labels_gather = tf.gather(self.labels_onehot, self.idx_labeled)
logits_gather = tf.gather(self.logits, self.idx_labeled)
self.classification_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_gather,
logits=logits_gather))
epsilon = 1e-8
if self.dtype == tf.float16:
epsilon = 1e-4 # improve numerical stability for half precision
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=epsilon)
self.train_op = self.optimizer.minimize(self.classification_loss, var_list=[*self.all_weights[0],
*self.all_biases[0]])
def make_loss(self, ll_constraint=True, ll_cutoff=0.004):
"""
Construct the update of the adjacency matrix based on the (approximate) meta gradients.
Parameters
----------
ll_constraint: bool
Whether to enforce the unnoticeability constraint on the degree distribution.
ll_cutoff: float
Cutoff value for the unnoticeability constraint. Smaller means stricter constraint. 0.004 corresponds to a
p-value of 0.95 in the Chi-square distribution with one degree of freedom.
"""
with self.graph.as_default():
logits_labeled = tf.gather(self.logits, self.idx_labeled)
labels_train = tf.gather(self.labels_onehot, self.idx_labeled)
logits_unlabeled = tf.gather(self.logits, self.idx_unlabeled)
labels_selftrain = tf.gather(self.labels_onehot, self.idx_unlabeled)
loss_labeled = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits_labeled,
labels=labels_train))
loss_unlabeled = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits_unlabeled,
labels=labels_selftrain))
if self.lambda_ == 1:
attack_loss = loss_labeled
elif self.lambda_ == 0:
attack_loss = loss_unlabeled
else:
attack_loss = self.lambda_ * loss_labeled + (1 - self.lambda_) * loss_unlabeled
# This variable "stores" the gradients of every inner training step.
self.grad_sum = tf.Variable(np.zeros(self.N | |
# SPDX-FileCopyrightText: 2021 <NAME>
# SPDX-License-Identifier: MIT
"""
LED glasses mappings
"""
# Maps to link IS31FL3741 LEDs to pixels
# Full LED glasses 18 x 5 matrix
glassesmatrix_ledmap = (
65535,
65535,
65535, # (0,0) (clipped, corner)
10,
8,
9, # (0,1) / right ring pixel 20
13,
11,
12, # (0,2) / 19
16,
14,
15, # (0,3) / 18
4,
2,
3, # (0,4) / 17
217,
215,
216, # (1,0) / right ring pixel #21
220,
218,
219, # (1,1)
223,
221,
222, # (1,2)
226,
224,
225, # (1,3)
214,
212,
213, # (1,4)
187,
185,
186, # (2,0)
190,
188,
189, # (2,1)
193,
191,
192, # (2,2)
196,
194,
195, # (2,3)
184,
182,
183, # (2,4)
37,
35,
36, # (3,0)
40,
38,
39, # (3,1)
43,
41,
42, # (3,2)
46,
44,
45, # (3,3)
34,
32,
33, # (3,4)
67,
65,
66, # (4,0)
70,
68,
69, # (4,1)
73,
71,
72, # (4,2)
76,
74,
75, # (4,3)
64,
62,
63, # (4,4)
97,
95,
96, # (5,0)
100,
98,
99, # (5,1)
103,
101,
102, # (5,2)
106,
104,
105, # (5,3)
94,
92,
93, # (5,4)
127,
125,
126, # (6,0) / right ring pixel 3
130,
128,
129, # (6,1)
133,
131,
132, # (6,2)
136,
134,
135, # (6,3)
124,
122,
123, # (6,4)
157,
155,
156, # (7,0)
160,
158,
159, # (7,1)
163,
161,
162, # (7,2) / right ring pixel 5
166,
164,
165, # (7,3) / 6
244,
242,
243, # (7,4) / 7
247,
245,
246, # (8,0)
250,
248,
249, # (8,1)
253,
251,
252, # (8,2)
256,
254,
255, # (8,3)
65535,
65535,
65535, # (8,4) (clipped, nose bridge)
345,
347,
346, # (9,0)
342,
344,
343, # (9,1)
267,
269,
268, # (9,2)
263,
265,
264, # (9,3)
65535,
65535,
65535, # (9,4) (clipped, nose bridge)
336,
338,
337, # (10,0)
333,
335,
334, # (10,1)
237,
239,
238, # (10,2) / left ring pixel 19
233,
235,
234, # (10,3) / 18
348,
262,
349, # (10,4) / 17
327,
329,
328, # (11,0) / left ring pixel 21
324,
326,
325, # (11,1)
207,
209,
208, # (11,2)
203,
205,
204, # (11,3)
330,
202,
331, # (11,4)
318,
320,
319, # (12,0)
315,
317,
316, # (12,1)
177,
179,
178, # (12,2)
173,
175,
174, # (12,3)
321,
172,
322, # (12,4)
309,
311,
310, # (13,0)
306,
308,
307, # (13,1)
147,
149,
148, # (13,2)
143,
145,
144, # (13,3)
312,
142,
313, # (13,4)
300,
302,
301, # (14,0)
297,
299,
298, # (14,1)
117,
119,
118, # (14,2)
113,
115,
114, # (14,3)
303,
112,
304, # (14,4)
291,
293,
292, # (15,0)
288,
290,
289, # (15,1)
87,
89,
88, # (15,2)
83,
85,
84, # (15,3)
294,
82,
295, # (15,4)
282,
284,
283, # (16,0) / left ring pixel 3
279,
281,
280, # (16,1)
57,
59,
58, # (16,2)
53,
55,
54, # (16,3)
285,
52,
286, # (16,4)
65535,
65535,
65535, # (17,0) (clipped, corner)
270,
272,
271, # (17,1) / left ring pixel 4
27,
29,
28, # (17,2) / 5
23,
25,
24, # (17,3) / 6
276,
22,
277, # (17,4) / 7
)
# LED glasses 18 x 5 matrix but excluding LEDs shared with the eye rings
glassesmatrix_ledmap_no_ring = (
65535,
65535,
65535, # (0,0) (clipped, corner)
65535,
65535,
65535, # (0,1) / right ring pixel 20
65535,
65535,
65535, # (0,2) / 19
65535,
65535,
65535, # (0,3) / 18
65535,
65535,
65535, # (0,4) / 17
65535,
65535,
65535, # (1,0) / right ring pixel #21
220,
218,
219, # (1,1)
223,
221,
222, # (1,2)
226,
224,
225, # (1,3)
214,
212,
213, # (1,4)
187,
185,
186, # (2,0)
190,
188,
189, # (2,1)
193,
191,
192, # (2,2)
196,
194,
195, # (2,3)
184,
182,
183, # (2,4)
37,
35,
36, # (3,0)
40,
38,
39, # (3,1)
43,
41,
42, # (3,2)
46,
44,
45, # (3,3)
34,
32,
33, # (3,4)
67,
65,
66, # (4,0)
70,
68,
69, # (4,1)
73,
71,
72, # (4,2)
76,
74,
75, # (4,3)
64,
62,
63, # (4,4)
97,
95,
96, # (5,0)
100,
98,
99, # (5,1)
103,
101,
102, # (5,2)
106,
104,
105, # (5,3)
94,
92,
93, # (5,4)
127,
125,
126, # (6,0) / right ring pixel 3
130,
128,
129, # (6,1)
133,
131,
132, # (6,2)
136,
134,
135, # (6,3)
124,
122,
123, # (6,4)
157,
155,
156, # (7,0)
160,
158,
159, # (7,1)
163,
161,
162, # (7,2) / right ring pixel 5
166,
164,
165, # (7,3) / 6
244,
242,
243, # (7,4) / 7
247,
245,
246, # (8,0)
250,
248,
249, # (8,1)
253,
251,
252, # (8,2)
256,
254,
255, # (8,3)
65535,
65535,
65535, # (8,4) (clipped, nose bridge)
345,
347,
346, # (9,0)
342,
344,
343, # (9,1)
267,
269,
268, # (9,2)
263,
265,
264, # (9,3)
65535,
65535,
65535, # (9,4) (clipped, nose bridge)
336,
338,
337, # (10,0)
333,
335,
334, # (10,1)
237,
239,
238, # (10,2) / left ring pixel 19
233,
235,
234, # (10,3) / 18
348,
262,
349, # (10,4) / 17
327,
329,
328, # (11,0) / left ring pixel 21
324,
326,
325, # (11,1)
207,
209,
208, # (11,2)
203,
205,
204, # (11,3)
330,
202,
331, # (11,4)
318,
320,
319, # (12,0)
315,
317,
316, # (12,1)
177,
179,
178, # (12,2)
173,
175,
174, # (12,3)
321,
172,
322, # (12,4)
309,
311,
310, # (13,0)
306,
308,
307, # (13,1)
147,
149,
148, # (13,2)
143,
145,
144, # (13,3)
312,
142,
313, # (13,4)
300,
302,
301, # (14,0)
297,
299,
298, # (14,1)
117,
119,
118, # (14,2)
113,
115,
114, # (14,3)
303,
112,
304, # (14,4)
291,
293,
292, # (15,0)
288,
290,
289, # (15,1)
87,
89,
88, # (15,2)
83,
85,
84, # (15,3)
294,
82,
295, # (15,4)
65535,
65535,
65535, # (16,0) / left ring pixel 3
279,
281,
280, # (16,1)
57,
59,
58, # (16,2)
53,
55,
54, # (16,3)
285,
52,
286, # (16,4)
65535,
65535,
65535, # (17,0) (clipped, corner)
65535,
65535,
65535, # (17,1) / left ring pixel 4
65535,
65535,
65535, # (17,2) / 5
65535,
65535,
65535, # (17,3) / 6
65535,
65535,
65535, # (17,4) / 7
)
# Left LED glasses eye ring
left_ring_map = (
341,
210,
211, # 0
332,
180,
181, # 1
323,
150,
151, # 2
127,
125,
126, # 3
154,
152,
153, # 4
163,
161,
162, # 5
166,
164,
165, # 6
244,
242,
243, # 7
259,
257,
258, # 8
169,
167,
168, # 9
139,
137,
138, # 10
109,
107,
108, # 11
79,
77,
78, # 12
49,
47,
48, # 13
199,
197,
198, # 14
229,
227,
228, # 15
19,
17,
18, # 16
4,
2,
3, # 17
16,
14,
15, # 18
13,
11,
12, # 19
10,
8,
9, # 20
217,
215,
216, # 21
7,
5,
6, # 22
350,
240,
241, # 23
)
# Left LED glasses eye ring excluding inner LEDs shared with the 18 x 5 matrix
left_ring_map_no_inner = (
341,
210,
211, # 0
332,
180,
181, # 1
323,
150,
151, # 2
65535,
65535,
65535, # 3
65535,
65535,
65535, # 4
65535,
65535,
65535, # 5
65535,
65535,
65535, # 6
65535,
65535,
65535, # 7
259,
257,
258, # 8
169,
167,
168, # 9
139,
137,
138, # 10
109,
| |
# -*- coding: utf-8 -*-
"""
Q02 from First assignment letter (c)
Backpropagation, Stochastic with Delta Rule and Momentum Term
Class Deep Learning
UFPB
Mar, 31 2018.
<NAME>
GitHub @rafaelmm
"""
####################################
# IMPORTANT THINGS HERE
#
#
####################################
import numpy as np
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
# -----------------------------------
# Dataset Generator
# -----------------------------------
def dataset_generator(n_tra, n_val=0, n_tes=0):
"""
Generates a dataset that represents the ternary message system.
@params:
n_tra - the number of training examples to be generated
n_val - the number of validation examples in dataset
n_tes - the number of test examples in dataset
returns a tuple of NumPy arrays in the format of
(training inputs, training targets, validation input, ...
validation target, test input, test targets)
"""
total = n_tra + n_val + n_tes
# Each example needs to be a randomly vector of three positions
# with binary (0,1) number. Also, it has additive noise
# of radialy 0.1 decimals. The target needs to be a eigth position
# one hot encoded vector with binary (-1 , 1) without noise.
allset_in = np.random.randint(2, size=(total, 3))
allset_noise = np.random.rand(total, 3) * 0.2 - 0.1
allset_target = np.full((total, 8), -1)
# allset_target adjust bin to one_hot_binary
for x in range(total):
# obtaining the position bin to dec
p = int(''.join(str(y) for y in allset_in[x]), 2)
allset_target[x, p] = 1
# adding noise to dataset
allset_in = np.add(allset_in, allset_noise)
# scattering the dataset
tra_in = allset_in[0:n_tra, :]
tra_out = allset_target[0:n_tra, :]
val_in = allset_in[n_tra:(n_tra+n_val), :]
val_out = allset_target[n_tra:(n_tra+n_val), :]
tes_in = allset_in[(total-n_tes):total, :]
tes_out = allset_target[(total-n_tes):total, :]
return (tra_in, tra_out, val_in, val_out, tes_in, tes_out)
# -----------------------------------
# Plot Error Viewer
# -----------------------------------
def plot_error(error_vector):
"""
Function to show the progress of the error vector
"""
plt.figure()
plt.plot(range(len(error_vector)), error_vector)
plt.title('Error value evolution')
plt.xlabel('Number of Epochs')
plt.ylabel('Error value')
plt.show()
# -----------------------------------
# Weights Structure Creator
# -----------------------------------
def weights_init(net_arc):
"""
Function that initialize the weights randomly using the numpy
library.
@Param: w - weightsvalues
"""
num_layers = np.shape(net_arc)[0]
max_neu = np.max(net_arc)
w = np.zeros(shape=([num_layers-1, max_neu, max_neu+1]))
for layer in range(num_layers-1):
for neuron in range(net_arc[layer+1]):
for conexion in range(net_arc[layer]+1):
w[layer][neuron][conexion] = np.random.random() - 0.5
return w
# -----------------------------------
# Activation Functions
# -----------------------------------
def activation_func(func_type, z):
"""
Implements the different kind of activation functions including:
line - linear function
sigm - sigmoidal
tanh - hyperbolic tangent
ptanh - smothly hyperbolic tangent
relu - Rectfied
step - Heavside (binary step 0 or 1)
"""
if func_type == 'line':
return z
if func_type == 'sigm':
return (1 / (1 + np.exp(-z)))
if func_type == 'tanh':
return (np.tanh(z))
if func_type == 'ptanh':
a = 1.7159
b = 2/3
return (a*np.tanh(b*z))
if func_type == 'relu':
return np.max(np.array([0, z]))
if func_type == 'step':
return (1 if z > 0.5 else 0)
# -----------------------------------
# Derivated Activation Functions
# -----------------------------------
def deriv_activation_func(func_type, z):
"""
Implements the different kind of derivated activation functions including:
line - linear function
sigm - sigmoidal
tanh - hyperbolic tangent
ptanh - smothly hyperbolic tangent
relu - Rectfied
step - Heavside (binary step 0 or 1)
"""
if func_type == 'line':
return 1
if func_type == 'sigm':
return (1 / (1 + np.exp(-z))) - ((1 / (1 + np.exp(-z))) ** 2)
if func_type == 'tanh':
return (1/((np.cosh(z) ** 2)))
if func_type == 'ptanh':
a = 1.7159
b = 2/3
return (a*b*(1-(np.tanh(b*z)**2)))
if func_type == 'relu':
return np.max(np.array([0, z]))
if func_type == 'step':
return (1 if z > 0.5 else 0)
# -----------------------------------
# Activation Functions Plotter
# -----------------------------------
def visualizeActivationFunc(func_type, z):
"""
Makes a plot of the activation function with input z
"""
fz = []
for i in range(len(z)):
fz.append(activation_func(func_type, z[i]))
plt.figure()
plt.plot(z, fz)
plt.xlabel('Input')
plt.ylabel('Output Values')
plt.show()
# -----------------------------------
# Derivated Activation Functions Plotter
# -----------------------------------
def visualizeDerivActivationFunc(func_type, z):
"""
Makes a plot of the activation function with input z
"""
fz = []
for i in range(len(z)):
fz.append(deriv_activation_func(func_type, z[i]))
plt.figure()
plt.plot(z, fz)
plt.xlabel('Input')
plt.ylabel('Output Values')
plt.show()
# -----------------------------------
# Forward Step of Neural Net
# -----------------------------------
def forward(net_arc, net_func, w, b, X):
"""
The forward pathway of the mlp neural net, it calculates the result of the
structure considering the X input. It passthroug each neuron of each layer.
"""
num_layers = np.shape(net_arc)[0]
max_neu = np.max(net_arc)
Y = np.zeros(shape=([num_layers, max_neu]))
for layer in range(num_layers):
if layer == 0:
for neuron in range(net_arc[layer]):
Y[layer, neuron] = X[neuron]
else:
for neuron in range(net_arc[layer]):
act_sum = np.dot(w[layer-1, neuron, 1:(net_arc[layer-1]+1)],
Y[layer-1, 0:net_arc[layer-1]]) + \
w[layer-1, neuron, 0]*b
Y[layer, neuron] = activation_func(net_func[layer], act_sum)
# returning the output layer, the last one
return Y[num_layers-1, 0:(net_arc[num_layers-1])]
# -----------------------------------
# Predict Limiar Output
# -----------------------------------
def predict(output):
"""
It's just to round prediction of the perceptron to making
results more conforming with the real target value
"""
y_pred = [1 if x >= 0 else -1 for x in output]
return y_pred
# -----------------------------------
# Training Function Stochastic (shuffling all dataset on each epoch)
# -----------------------------------
def training_net_delta_mom(net_arc, net_func, w, b, data_in, target, learn_rate,
alfa, num_epochs, err_max=0.0001):
"""
This function execute the algorithm of weights adjustes
following the steps of measure the error and changing the
w structure by its gradient
@args
w - weights structure
data_in - training dataset
target - training targets of dataset
num_epochs - the total overall loopings to adjuste the w
learn_rate - the coefficient that ponderate the changes in w
alfa - momentum term
err_max - a specified value for maximum error acepted in training
"""
# num of layers
num_layers = np.shape(net_arc)[0]
# max num of neurons on any layer
max_neu = np.max(net_arc)
# num of examples in input dataset
num_examples = np.shape(data_in)[0]
# output size (last network layer size)
out_size = net_arc[len(net_arc)-1]
# local error for each example (or instantaneous error)
err_local = np.zeros(shape=(num_examples, 1))
# The value of output for each neuron for an especific example
Y = np.zeros(shape=([num_layers, max_neu]))
# The value of soma(the accumulator vefore the activator function)
soma = np.zeros(shape=([num_layers, max_neu]))
# The result of gardient descendent for each neuron
gradi = np.zeros(shape=([num_layers, max_neu]))
# auxiliar temporary weights
oldw = np.copy(w)
# the vector error to total epohcs (mean squarred error)
err_vec = np.zeros((num_epochs, 1))
# Starting the trainning loop
for ep in range(num_epochs):
# cleaning local error and mse for each epoch
err_local = np.zeros(shape=(num_examples, 1))
ms_error = 0
# for each example
# Stochastic - shuffle in each epoch
for example in list(shuffle(range(num_examples))):
# for example in range(num_examples):
# ----------------
# 1 - Forward Step
# ----------------
for layer in range(num_layers):
if layer == 0:
for neuron in range(net_arc[layer]):
Y[layer, neuron] = data_in[example, neuron]
else:
for neuron in range(net_arc[layer]):
soma[layer, neuron] = np.dot(w[layer-1, neuron, 1:(net_arc[layer-1]+1)], Y[layer-1, 0:net_arc[layer-1]]) + w[layer-1, neuron, 0]*b
Y[layer, neuron] = activation_func(net_func[layer],
soma[layer, neuron])
# ---------------------
# 2 - Error Measurement
# ---------------------
# to calculate example squared error
err_example = np.zeros(shape=(out_size, 1))
for neuron in range(out_size):
err_example[neuron] = target[example, neuron] - Y[num_layers-1, neuron]
# err_example = err_example ** 2
err_local[example] = np.sum(err_example ** 2) / 2
# ---------------------
# 3 - Backpropagation
# ---------------------
# Just last and hidden layers (not input layer)
for layer in range(num_layers-1, 0, -1):
# if last layer
if layer == (num_layers - 1):
for neuron in range(out_size):
gradi[layer, neuron] = err_example[neuron] * deriv_activation_func(net_func[layer], soma[layer, neuron])
# bias update
deltaw = learn_rate * b * gradi[layer, neuron]
aux = w[layer-1, neuron, 0]
mom = alfa * (aux - oldw[layer-1, neuron, 0])
w[layer-1, neuron, 0] = aux + deltaw + mom
oldw[layer-1, neuron, 0] = aux
# other weights
for weight in range(net_arc[layer-1]):
deltaw = learn_rate * gradi[layer, neuron] * Y[layer-1, weight]
aux = w[layer-1, neuron, weight+1]
mom = alfa * (aux - oldw[layer-1, neuron, weight+1])
w[layer-1, neuron, weight+1] = aux + deltaw + mom
oldw[layer-1, neuron, weight+1] = aux
# if hidden layer (not last)
else:
for neuron in range(net_arc[layer]):
soma_gradi = 0
# for each neuron on step ahead layer
for kneuron in range(net_arc[layer+1]):
soma_gradi += gradi[layer+1, kneuron] * oldw[layer, kneuron, neuron+1]
gradi[layer, neuron] = soma_gradi * deriv_activation_func(net_func[layer], soma[layer, neuron])
# bias update
deltaw = learn_rate * b * gradi[layer, neuron]
| |
<gh_stars>0
import sys
import os
import json
import re
# ---
re_pattern_package_fullname = r"([A-Za-z0-9_-]+)::([A-Za-z0-9_-]+)"
re_pattern_account_id = r"([0-9]+)"
re_pattern_stock_package_name = r"(abstract_rtsp_media_source|hdmi_data_sink)"
re_pattern_interface_fullname = r"([A-Za-z0-9_-]+)::([A-Za-z0-9_-]+)\.([A-Za-z0-9_-]+)"
re_pattern_edge_fullname = r"([A-Za-z0-9_-]+)\.([A-Za-z0-9_-]+)"
re_pattern_edge_parameter_node_name = r"([A-Za-z0-9_-]+)"
# ---
def load_json_file(filepath):
with open(filepath) as fd:
return json.load(fd)
# ---
class PackageBase:
pass
class JsonPackage(PackageBase):
def __init__( self, filepath ):
self.d = load_json_file(filepath)
class AbstractRtspMediaSourcePackage(PackageBase):
def __init__(self):
self.d = {
"nodePackage" : {
"envelopeVersion": "2021-01-01",
"name": "abstract_rtsp_media_source",
"version": "1.0",
"description": "",
"assets" : [
# placeholder information for stock package
{
"name": "rtsp_v1_asset",
"implementations": [
{
"type": "system",
"assetUri":"source/video/camera/rtsp/source_rtsp"
}
]
}
],
"interfaces" : [
{
"name": "rtsp_v1_interface",
"category": "media_source",
"asset": "rtsp_v1_asset",
"outputs": [
{
"name": "video_out",
"type": "media",
},
],
},
],
}
}
class HdmiDataSinkPackage(PackageBase):
def __init__(self):
self.d = {
"nodePackage" : {
"envelopeVersion": "2021-01-01",
"name": "hdmi_data_sink",
"version": "1.0",
"description": "",
"assets" : [
# placeholder information for stock package
{
"name": "hdmi0_asset",
"implementations": [
{
"type": "data_sink",
"assetUri": "",
"descriptorUri": ""
}
]
}
],
"interfaces" : [
{
"name": "hdmi0",
"category": "data_sink",
"asset": "hdmi0_asset",
"inputs": [
{
"name": "video_in",
"type": "media",
},
],
},
],
}
}
# ---
class Node:
def __init__(self):
pass
class PackagedNode(Node):
def __init__( self, interface_elm, asset_elm ):
Node.__init__(self)
self.interface_elm = interface_elm
self.asset_elm = asset_elm
def lookup_input_output( self, list_name, name ):
for elm in self.interface_elm[list_name]:
if elm["name"] == name:
return elm
interface_name = self.interface_elm["name"]
raise ValueError( f"'{name}' not found in interface '{interface_name}.{list_name}'" )
class BusinessLogicContainerNode(PackagedNode):
def __init__( self, interface_elm, asset_elm ):
PackagedNode.__init__( self, interface_elm, asset_elm )
self.inputs = {}
self.outputs = {}
def connect_producer( self, input_name, producer_node, producer_output_name ):
print( "Connecting producer", input_name, producer_node, producer_output_name )
if isinstance( producer_node, PackagedNode ):
input_elm = self.lookup_input_output( "inputs", input_name )
output_elm = producer_node.lookup_input_output( "outputs", producer_output_name )
input_type = input_elm["type"]
output_type = output_elm["type"]
if input_type != output_type:
raise ValueError( f"Interface input/output types mismatch {input_type} != {output_type}" )
self.inputs[input_name] = producer_node
def connect_consumer( self, output_name, consumer_node, consumer_input_name ):
if isinstance( consumer_node, PackagedNode ):
output_elm = self.lookup_input_output( "outputs", output_name )
input_elm = consumer_node.lookup_input_output( "inputs", consumer_input_name )
input_type = input_elm["type"]
output_type = output_elm["type"]
if input_type != output_type:
raise ValueError( f"Interface input/output types mismatch {input_type} != {output_type}" )
self.outputs[output_name] = consumer_node
class ModelNode(PackagedNode):
def __init__( self, interface_elm, asset_elm ):
PackagedNode.__init__( self, interface_elm, asset_elm )
class MediaSourceRtspCameraNode(PackagedNode):
def __init__( self, interface_elm, asset_elm ):
PackagedNode.__init__( self, interface_elm, asset_elm )
class HdmiDataSinkNode(PackagedNode):
def __init__( self, interface_elm, asset_elm ):
PackagedNode.__init__( self, interface_elm, asset_elm )
class ParameterNode(Node):
def __init__( self, node_elm ):
t = node_elm["interface"]
v = node_elm["value"]
types = {
"float32" : float,
"int32" : int,
"string" : str,
"boolean" : bool,
}
if t not in types:
raise ValueError( f"Unknown parameter type {t}" )
if not isinstance( v, types[t] ):
raise TypeError( f"Expected type is {t} but value is {type(v)}" )
self.value = v
self.node_elm = node_elm
def lookup_input_output( self, list_name, name ):
print( "self.node_elm", self.node_elm )
for elm in self.interface_elm[list_name]:
if elm["name"] == name:
return elm
interface_name = self.interface_elm["name"]
raise ValueError( f"'{name}' not found in interface '{interface_name}.{list_name}'" )
# ---
class Graph:
def __init__(self):
self.packages = {}
self.nodes = {}
self.business_logic_node = None
def load( self, app_dir_top, app_name ):
self.app_dir_top = app_dir_top
self.app_name = app_name
graph_filepath = os.path.join( app_dir_top, "graphs", app_name, "graph.json" )
print( "Loading graph:", graph_filepath )
print( "" )
graph_json = load_json_file(graph_filepath)
print( "Loading packages" )
# load dependent package JSON files, and descriptor JSON files
for package_elm in graph_json["nodeGraph"]["packages"]:
package_fullname = package_elm["name"]
package_version = package_elm["version"]
print( f"Processing {package_fullname}" )
re_result = re.match( re_pattern_package_fullname, package_fullname )
if re_result:
account_id = re_result.group(1)
package_name = re_result.group(2)
if account_id == "panorama":
if package_name=="abstract_rtsp_media_source":
self.packages[package_name] = AbstractRtspMediaSourcePackage()
elif package_name=="hdmi_data_sink":
self.packages[package_name] = HdmiDataSinkPackage()
else:
raise ValueError( f"Unsupported stock package name : {package_name}" )
else:
# FIXME : check if this matches actual account id.
self.load_package_from_json( account_id, package_name, package_version )
else:
raise ValueError( f"Package name didn't match the expected pattern : {package_fullname}" )
print( "Loaded packages:", self.packages.keys() )
print( "" )
print( "Creating nodes" )
# construct node graph data combining with already loaded package/asset data
for node_elm in graph_json["nodeGraph"]["nodes"]:
node_name = node_elm["name"]
interface_fullname = node_elm["interface"]
print( f"Processing {node_name}" )
re_result = re.match( re_pattern_interface_fullname, interface_fullname )
if re_result:
account_id = re_result.group(1) # FIXME : check if this matches actual account id.
package_name = re_result.group(2)
interface_name = re_result.group(3)
if account_id == "panorama":
if package_name=="abstract_rtsp_media_source":
pass
elif package_name=="hdmi_data_sink":
pass
else:
raise ValueError( f"Unsupported stock package name : {package_name}" )
else:
# FIXME : check if this matches actual account id.
pass
interface_elm = self.lookup_interface_from_package( package_name, interface_name )
interface_category = interface_elm["category"]
interface_asset_name = interface_elm["asset"]
print( "package_name:", package_name )
print( "interface_name:", interface_name )
print( "interface_category:", interface_category )
print( "interface_asset_name:", interface_asset_name )
try:
asset_elm = self.lookup_asset_from_package( package_name, interface_asset_name )
except KeyError as e:
if interface_category == "business_logic":
# In test-utility, we don't require asset for business logic. Use default information if missing.
asset_elm = {
"name": "code",
"implementations": [
{
"type": "container",
"assetUri": "",
"descriptorUri": ""
}
]
}
else:
raise
asset_implementation_elm = asset_elm["implementations"][0] # FIXME : assuming "implementations" is always length=1
asset_implementation_type = asset_implementation_elm["type"]
if interface_category=="business_logic":
if asset_implementation_type == "container":
print( "Creating BusinessLogicContainerNode:", node_name )
node = BusinessLogicContainerNode( interface_elm, asset_elm )
if self.business_logic_node:
raise ValueError( "Multiple business logic nodes are not supported" )
self.business_logic_node = node
self.nodes[ node_name ] = node
else:
raise ValueError( f"Unsupported asset type '{asset_implementation_type}' for interface category '{interface_category}'" )
elif interface_category=="ml_model":
if asset_implementation_type == "model":
print( "Creating ModelNode:", node_name )
node = ModelNode( interface_elm, asset_elm )
self.nodes[ node_name ] = node
else:
raise ValueError( f"Unsupported asset type '{asset_implementation_type}' for interface category '{interface_category}'" )
elif interface_category=="media_source":
print("asset_implementation_type:", asset_implementation_type)
if asset_implementation_type == "system":
asset_implementation_uri = asset_implementation_elm["assetUri"]
if asset_implementation_uri == "source/video/camera/rtsp/source_rtsp":
print( "Creating MediaSourceRtspCameraNode:", node_name )
node = MediaSourceRtspCameraNode( interface_elm, asset_elm )
self.nodes[ node_name ] = node
else:
raise ValueError( f"Unsupported asset uri '{asset_implementation_uri}' for asset implementation type '{asset_implementation_type}'" )
else:
raise ValueError( f"Unsupported asset type '{asset_implementation_type}' for interface category '{interface_category}'" )
elif interface_category=="data_sink":
print( "Creating HdmiDataSinkNode:", node_name )
node = HdmiDataSinkNode( interface_elm, asset_elm )
self.nodes[ node_name ] = node
else:
raise ValueError( f"Unknown interface category '{interface_category}'" )
elif interface_fullname in ("boolean", "float32", "int32", "string"):
print( "Creating ParameterNode:", node_name )
node = ParameterNode( node_elm )
self.nodes[ node_name ] = node
else:
raise ValueError( f"Interface name didn't match the expected pattern : {interface_fullname}" )
print( "Created nodes:", self.nodes.keys() )
print( "" )
print( "Connecting edges" )
# connect nodes using interfaces and edges
for edge_elm in graph_json["nodeGraph"]["edges"]:
print( "Resolving edge:", edge_elm )
edge_producer = edge_elm["producer"]
edge_consumer = edge_elm["consumer"]
re_result = re.match( re_pattern_edge_fullname, edge_producer )
if re_result:
edge_producer_node_name = re_result.group(1)
edge_producer_output_name = re_result.group(2)
else:
re_result = re.match( re_pattern_edge_parameter_node_name, edge_producer )
if re_result:
edge_producer_node_name = re_result.group(1)
edge_producer_output_name = None
else:
raise ValueError( f"Edge name didn't match the expected pattern : {edge_producer}" )
re_result = re.match( re_pattern_edge_fullname, edge_consumer )
if re_result:
edge_consumer_node_name = re_result.group(1)
edge_consumer_input_name = re_result.group(2)
else:
raise ValueError( f"Edge name didn't match the expected pattern : {edge_consumer}" )
producer_node = self.nodes[edge_producer_node_name]
consumer_node = self.nodes[edge_consumer_node_name]
if isinstance( consumer_node, BusinessLogicContainerNode ):
consumer_node.connect_producer( edge_consumer_input_name, producer_node, edge_producer_output_name )
elif isinstance( producer_node, BusinessLogicContainerNode ):
producer_node.connect_consumer( edge_producer_output_name, consumer_node, edge_consumer_input_name )
print( "Inputs/Outputs of business logic container:" )
print( "Inputs:", self.business_logic_node.inputs )
print( "Outputs:", self.business_logic_node.outputs )
def load_package_from_json( self, account_id, package_name, package_version ):
package_dir = os.path.join( self.app_dir_top, "packages", f"{account_id}-{package_name}-{package_version}" )
package_filepath = os.path.join( package_dir, "package.json" )
print( "Loading package:", package_filepath )
package = JsonPackage( package_filepath )
#package.dump()
# name fied in package.json is optional. check if it is same as graph.json if exists.
if "name" in package.d["nodePackage"]:
package_name_in_package = package.d["nodePackage"]["name"]
if package_name_in_package != package_name:
raise ValueError( f"Package name doesn't match : {package_name} != {package_name_in_package}" )
# version fied in package.json is optional. check if it is same as graph.json if exists.
if "version" in package.d["nodePackage"]:
package_version_in_package = package.d["nodePackage"]["version"]
if package_version_in_package != package_version:
raise ValueError( f"Package version doesn't match : {package_version} != {package_version_in_package}" )
self.packages[package_name] | |
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Instalog plugin base.
Defines plugin classes (buffer, input, output), and a PluginAPI interface for
plugins to access.
"""
import inspect
import logging
import os
import sys
import time
from cros.factory.instalog import log_utils
from cros.factory.instalog.utils import arg_utils
from cros.factory.instalog.utils import time_utils
class LoadPluginError(Exception):
"""The plugin encountered an error while loading."""
class WaitException(Exception):
"""The plugin currently cannot perform the requested operation."""
class UnexpectedAccess(Exception):
"""The plugin is accessing data when it should be stopped."""
class StateCommandError(Exception):
"""A state command on the plugin sandbox could not be run."""
class EventStreamExpired(Exception):
"""The event stream in question is expired and can no longer be used."""
class PluginCallError(Exception):
"""An error occurred when calling a method on the plugin instance."""
class ConfigError(Exception):
"""An error occurred when loading the config file."""
class PluginAPI:
"""Defines an interface for plugins to call."""
def SaveStore(self, plugin):
"""See Plugin.SaveStore."""
raise NotImplementedError
def GetDataDir(self, plugin):
"""See Plugin.GetDataDir."""
raise NotImplementedError
def IsStopping(self, plugin):
"""See Plugin.IsStopping."""
raise NotImplementedError
def IsFlushing(self, plugin):
"""See Plugin.IsStopping."""
raise NotImplementedError
def Emit(self, plugin, events):
"""See InputPlugin.Emit."""
raise NotImplementedError
def NewStream(self, plugin):
"""See OutputPlugin.NewStream."""
raise NotImplementedError
def EventStreamNext(self, plugin, plugin_stream, timeout):
"""See BufferEventStream.Next."""
raise NotImplementedError
def EventStreamCommit(self, plugin, plugin_stream):
"""See BufferEventStream.Commit."""
raise NotImplementedError
def EventStreamAbort(self, plugin, plugin_stream):
"""See BufferEventStream.Abort."""
raise NotImplementedError
class Plugin(log_utils.LoggerMixin):
"""Base class for a buffer plugin, input plugin, or output plugin in Instalog.
This is a base class for BufferPlugin, InputPlugin and OutputPlugin. Plugins
should subclass from these three classes.
This base class processes plugin arguments set through the ARGS variable, and
sets some shortcut functions to the logger.
"""
def __init__(self, config, logger_name, store, plugin_api):
"""Plugin constructor.
Args:
config: A dictionary representing arguments for this plugin. Will be
validated against the specification in ARGS.
logger: A reference to the logger for this plugin instance.
store: A reference to the plugin's store dictionary.
plugin_api: An instance of a class implementing PluginAPI.
Raises:
arg_utils.ArgError if the arguments fail to validate.
"""
# Try parsing the arguments according to the spec in ARGS.
arg_spec = getattr(self, 'ARGS', [])
self.args = arg_utils.Args(*arg_spec).Parse(config)
# log_utils.LoggerMixin creates shortcut functions for convenience.
self.logger = logging.getLogger(logger_name)
# Plugin data store dictionary.
self.store = store
# Save the core API to a private instance variable.
self._plugin_api = plugin_api
def SetUp(self):
"""Sets up any connections or threads needed.
This function should return to the caller after the plugin has been
initialized.
"""
return
def Main(self):
"""Main thread of the plugin, started by Instalog.
Should regularly check self.IsStopping(). In the case that IsStopping()
returns True, this thread should complete execution as soon as possible.
"""
return
def TearDown(self):
"""Shuts down any extra threads and connections used by the plugin.
This function should only return to the caller after all threads and
extra processes used by the plugin have stopped.
"""
return
def SaveStore(self):
"""Saves the data store dictionary to disk.
Plugins may make many updates to the store (inefficient to write on every
change), or might only want to write it to disk in certain situations to
ensure atomicity. Thus the action of saving the store is exposed for the
plugin to handle.
"""
return self._plugin_api.SaveStore(self)
def GetDataDir(self):
"""Returns the data directory of this plugin.
This directory is set aside by Instalog core for the plugin to store any
data. Its value can be expected to be consistent across plugin restarts or
Instalog restarts.
Raises:
UnexpectedAccess if the plugin instance is in some unexpected state and
is trying to access core functionality that it should not.
"""
return self._plugin_api.GetDataDir(self)
def GetNodeID(self):
"""Returns the node ID of this plugin.
Raises:
UnexpectedAccess if the plugin instance is in some unexpected state and
is trying to access core functionality that it should not.
"""
return self._plugin_api.GetNodeID(self)
def IsStopping(self):
"""Returns whether or not the plugin may continue running.
If True is returned, the plugin should continue running as usual. If False
is returned, the plugin should shut down as soon as it finishes its work.
Should be checked regularly in the Main thread, as well as any other threads
started by the plugin.
Raises:
UnexpectedAccess if the plugin instance is in some unexpected state and
is trying to access core functionality that it should not.
"""
return self._plugin_api.IsStopping(self)
def IsFlushing(self):
"""Returns whether or not the plugin is flushing.
If True is returned, the plugin should continue running as usual. If False
is returned, the plugin should process any remaining data, and not wait for
further data to be included in the current "batch".
Raises:
UnexpectedAccess if the plugin instance is in some unexpected state and
is trying to access core functionality that it should not.
"""
return self._plugin_api.IsFlushing(self)
def Sleep(self, secs):
"""Suspends execution of the current thread for the given number of seconds.
When a plugin is requested to stop, it might be in the middle of a
time.sleep call. This provides an alternative sleep function, which will
return immediately when a plugin changes to the STOPPING state.
Should typically be used at the end of an iteration of a plugin's Main
while loop. For example:
while not self.IsStopping():
# ... do some work ...
self.Sleep(self.args.interval)
"""
end_time = time_utils.MonotonicTime() + secs
while (time_utils.MonotonicTime() < end_time and
(not self.IsStopping() and not self.IsFlushing())):
time.sleep(min(1, secs))
class BufferPlugin(Plugin):
"""Base class for a buffer plugin in Instalog."""
def AddConsumer(self, consumer_id):
"""Subscribes the specified consumer ID to the buffer.
Args:
consumer_id: Unique identifier of the consumer being added.
"""
raise NotImplementedError
def RemoveConsumer(self, consumer_id):
"""Unsubscribes the specified consumer ID from the buffer.
Args:
consumer_id: Unique identifier of the consumer being removed.
"""
raise NotImplementedError
def ListConsumers(self, details=0):
"""Returns information about consumers subscribed to the buffer.
Returns:
A dictionary, where keys are consumer IDs, and values are tuples
of (completed_count, total_count) representing progress through
Event processing.
"""
raise NotImplementedError
def Produce(self, events):
"""Produces events to be stored into the buffer.
Args:
events: List of Event objects to be inserted into the buffer.
Returns:
True if successful, False otherwise.
"""
raise NotImplementedError
def Consume(self, consumer_id):
"""Returns a BufferEventStream to consume events from the buffer.
Args:
consumer_id: ID of the consumer for which to create a BufferEventStream.
Returns:
True if successful, False otherwise.
"""
raise NotImplementedError
class BufferEventStream:
"""Event stream interface that a buffer needs to implement.
Objects implementing BufferEventStream should be returned when the buffer
plugin's Consume method is called.
"""
def Next(self):
"""Returns the next available Event."""
raise NotImplementedError
def Commit(self):
"""Marks this batch of Events as successfully processed.
Marks this BufferEventStream as expired.
Raises:
EventStreamExpired if this BufferEventStream is expired.
"""
raise NotImplementedError
def Abort(self):
"""Aborts processing this batch of Events.
Marks this BufferEventStream as expired. This BufferEventStream's Events
will still be returned on subsequent Next calls from other BufferEventStream
objects.
Raises:
EventStreamExpired if this BufferEventStream is expired.
"""
raise NotImplementedError
class InputPlugin(Plugin):
"""Base class for an input plugin in Instalog."""
def Emit(self, events):
"""Emits a set of Event objects to be passed to Instalog's buffer.
Args:
events: Either a single Event or a list of Event objects to be emitted.
Returns:
True on success, False on failure. In either case, the plugin is
expected to deal appropriately with retrying, or letting its source know
that a failure occurred.
Raises:
UnexpectedAccess if the plugin instance is in some unexpected state and
is trying to access core functionality that it should not.
"""
try:
return self._plugin_api.Emit(self, events)
except WaitException:
return False
class OutputPlugin(InputPlugin):
"""Base class for an output plugin in Instalog.
An output plugin may also Emit events, thus OutputPlugin inherits from
InputPlugin as its parent class.
"""
def NewStream(self):
"""Gets a new EventStream object to retrieve output events.
Returns:
An EventStream object (see datatypes module). None if we currently do not
have permission to create a new EventStream object (i.e. plugin is not in
one of the allowed states), or if the data | |
service
@property
def id(self) -> int:
return self._id
@id.setter
def id(self, id):
self._id = id
class Command:
def __init__(self,
needs_admin: bool = None,
help_cmd: str = None,
description: str = None,
cmd: str = None,
payload_type: Union[PayloadType, str] = None,
operator: Union[Operator, str] = None,
creation_time: str = None,
version: int = None,
is_exit: bool = None,
id: int = None,
apfell_version: int = None,
params: List[Union['CommandParameters', Dict[str, str]]] = None,
transforms: List[Union['CommandTransform', Dict[str, str]]] = None):
self._needs_admin = needs_admin
self._help_cmd = help_cmd
self._description = description
self._cmd = cmd
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
if isinstance(operator, Operator) or operator is None:
self._operator = operator
else:
self._operator = Operator(username=operator)
self._creation_time = creation_time
self._version = version
self._is_exit = is_exit
self._id = id
self._apfell_version = apfell_version
if params is not None and params != []:
if isinstance(params, list):
self._params = [CommandParameters(**x) if isinstance(x, Dict) else x for x in params]
else:
raise ValueError("params must be a list")
else:
self._params = None
if transforms is not None and transforms != []:
print(transforms)
if isinstance(transforms, list):
print(transforms)
self._transforms = [CommandTransform(**x) if isinstance(x, Dict) else x for x in params]
else:
raise ValueError("transforms must be a list")
else:
self._transforms = None
def to_json(self):
r = {}
for k in vars(self):
if getattr(self, k) is not None:
try:
r[k[1:]] = getattr(self, k)
except:
r[k[1:]] = json.dumps(getattr(self, k), default=lambda o: o.to_json())
return r
def __str__(self):
return json.dumps(self.to_json())
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Command):
return (self._cmd == other.cmd and self._payload_type.ptype == other.payload_type.ptype) or (self._id is not None and other.id is not None and self._id == other.id)
return False
@property
def needs_admin(self) -> bool:
return self._needs_admin
@needs_admin.setter
def needs_admin(self, needs_admin):
self._needs_admin = needs_admin
@property
def help_cmd(self) -> str:
return self._help_cmd
@help_cmd.setter
def help_cmd(self, help_cmd):
self._help_cmd = help_cmd
@property
def description(self) -> str:
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def cmd(self) -> str:
return self._cmd
@cmd.setter
def cmd(self, cmd):
self._cmd = cmd
@property
def payload_type(self) -> PayloadType:
return self._payload_type
@payload_type.setter
def payload_type(self, payload_type):
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
@property
def operator(self) -> Operator:
return self._operator
@operator.setter
def operator(self, operator):
if isinstance(operator, Operator) or operator is None:
self._operator = operator
else:
self._operator = Operator(username=operator)
@property
def creation_time(self) -> str:
return self._creation_time
@creation_time.setter
def creation_time(self, creation_time):
self._creation_time = creation_time
@property
def version(self) -> int:
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def is_exit(self) -> bool:
return self._is_exit
@is_exit.setter
def is_exit(self, is_exit):
self._is_exit = is_exit
@property
def id(self) -> int:
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def apfell_version(self) -> int:
return self._apfell_version
@apfell_version.setter
def apfell_version(self, apfell_version):
self._apfell_version = apfell_version
@property
def params(self) -> List['CommandParameters']:
return self._params
@params.setter
def params(self, params):
if isinstance(params, list):
self._params = [CommandParameters(**x) if isinstance(x, Dict) else x for x in params]
elif params is None or params == []:
self._params = None
else:
raise ValueError("params must be a list")
@property
def transforms(self) -> List['CommandTransform']:
return self._transforms
@transforms.setter
def transforms(self, transforms):
if isinstance(transforms, list):
self._transforms = [CommandTransform(**x) if isinstance(x, Dict) else x for x in transforms]
elif transforms is None or transforms == []:
self._transforms = None
else:
raise ValueError("transforms must be a list")
class CommandParameters:
def __init__(self,
command: Union[Command, int] = None, # database ID for the corresponding command
cmd: str = None, # cmd string the command refers to (like shell)
payload_type: Union[PayloadType, str] = None,
name: str = None,
type: str = None,
hint: str = None,
choices: Union[List[str], str] = None,
required: bool = None,
operator: Union[Operator, str] = None,
id: int = None):
if isinstance(command, Command) or command is None:
self._command = command
else:
self._command = Command(id=command)
self._cmd = cmd
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
self._name = name
self._type = type
self._hint = hint
if isinstance(choices, List) or choices is None:
self._choices = choices
else:
self._choices = choices.split("\n")
self._required = required
if isinstance(operator, Operator) or operator is None:
self._operator = operator
else:
self._operator = Operator(username=operator)
self._id = id
def to_json(self):
r = {}
for k in vars(self):
if getattr(self, k) is not None:
try:
r[k[1:]] = getattr(self, k)
except:
r[k[1:]] = json.dumps(getattr(self, k), default=lambda o: o.to_json())
return r
def __str__(self):
return json.dumps(self.to_json())
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, CommandParameters):
return (self._name == other.name and (self._command == other.command) or (self._cmd == other.cmd)) or (self._id is not None and other.id is not None and self._id == other.id)
return False
@property
def command(self) -> Command:
return self._command
@command.setter
def command(self, command):
if isinstance(command, Command) or command is None:
self._command = command
else:
self._command = Command(id=command)
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def type(self) -> str:
return self._type
@type.setter
def type(self, type):
self._type = type
@property
def hint(self) -> str:
return self._hint
@hint.setter
def hint(self, hint):
self._hint = hint
@property
def choices(self) -> List[str]:
return self._choices
@choices.setter
def choices(self, choices):
if isinstance(choices, List) or choices is None:
self._choices = choices
else:
self._choices = choices.split("\n")
@property
def required(self) -> bool:
return self._required
@required.setter
def required(self, required):
self._required = required
@property
def operator(self) -> Operator:
return self._operator
@operator.setter
def operator(self, operator):
if isinstance(operator, Operator) or operator is None:
self._operator = operator
else:
self._operator = Operator(username=operator)
@property
def id(self) -> int:
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def cmd(self) -> str:
return self._cmd
@cmd.setter
def cmd(self, cmd):
self._cmd = cmd
@property
def payload_type(self) -> PayloadType:
return self._payload_type
@payload_type.setter
def payload_type(self, payload_type):
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
class CommandTransform:
def __init__(self,
command: Union[Command, str] = None,
command_id: int = None,
payload_type: Union[PayloadType, str] = None,
name: str = None,
operator: Union[Operator, str] = None,
timestamp: str = None,
order: int = None,
parameter: str = None,
operation: Union[Operation, str] = None,
active: bool = None,
id: int = None):
if isinstance(command, Command) or command is None:
self._command = command
else:
self.command = Command(cmd=command, id=command_id)
self._command_id = command_id
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
self._name = name
if isinstance(operator, Operator) or operator is None:
self._operator = operator
else:
self._operator = Operator(username=operator)
self._timestamp = timestamp
self._order = order
self._parameter = parameter
if isinstance(operation, Operation) or operation is None:
self._operation = operation
else:
self._operation = Operation(name=operation)
self._active = active
self._id = id
def to_json(self):
r = {}
for k in vars(self):
if getattr(self, k) is not None:
try:
r[k[1:]] = getattr(self, k)
except:
r[k[1:]] = json.dumps(getattr(self, k), default=lambda o: o.to_json())
return r
def __str__(self):
return json.dumps(self.to_json())
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, CommandTransform):
return self._id == other.id
return False
@property
def command(self) -> Command:
return self._command
@command.setter
def command(self, command):
if isinstance(command, Command) or command is None:
self._command = command
else:
self._command = Command(cmd=command, id=self._command_id)
@property
def command_id(self) -> int:
return self._command_id
@command_id.setter
def command_id(self, command_id):
self._command_id = command_id
@property
def payload_type(self) -> PayloadType:
return self._payload_type
@payload_type.setter
def payload_type(self, payload_type):
if isinstance(payload_type, PayloadType) or payload_type is None:
self._payload_type = payload_type
else:
self._payload_type = PayloadType(ptype=payload_type)
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def operator(self) -> Operator:
return self._operator
@operator.setter
def operator(self, operator):
if isinstance(operator, Operator) or operator is None:
self._operator = operator
else:
self._operator = Operator(username=operator)
@property
def timestamp(self) -> str:
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
self._timestamp = timestamp
@property
def order(self) -> int:
return self._order
@order.setter
def order(self, order):
self._order = order
@property
def parameter(self) -> str:
return self._parameter
@parameter.setter
def parameter(self, parameter):
self._parameter = parameter
@property
def operation(self) -> Operation:
return self._operation
@operation.setter
def operation(self, operation):
if isinstance(operation, Operation) or operation is None:
self._operation = operation
else:
self._operation = Operation(name=operation)
@property
def | |
a rank-3 tensor (3D array) with a vector using
tensor product and tensor contraction.
Parameters
----------
T: sp.Array of dimensions n x m x k
v: sp.Array of dimensions k x 1
Returns
-------
A: sp.Array of dimensions n x m
Example
-------
>>>T = sp.Array([[[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]],
[[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]]])
⎡⎡1 4 7 10⎤ ⎡13 16 19 22⎤⎤
⎢⎢ ⎥ ⎢ ⎥⎥
⎢⎢2 5 8 11⎥ ⎢14 17 20 23⎥⎥
⎢⎢ ⎥ ⎢ ⎥⎥
⎣⎣3 6 9 12⎦ ⎣15 18 21 24⎦⎦
>>>v = sp.Array([1, 2, 3, 4]).reshape(4, 1)
⎡1⎤
⎢ ⎥
⎢2⎥
⎢ ⎥
⎢3⎥
⎢ ⎥
⎣4⎦
>>>tensor3_vector_product(T, v)
⎡⎡70⎤ ⎡190⎤⎤
⎢⎢ ⎥ ⎢ ⎥⎥
⎢⎢80⎥ ⎢200⎥⎥
⎢⎢ ⎥ ⎢ ⎥⎥
⎣⎣90⎦ ⎣210⎦⎦
"""
import sympy as sp
assert(T.rank() == 3)
# reshape v to ensure 1D vector so that contraction do not contain x 1
# dimension
v.reshape(v.shape[0], )
p = sp.tensorproduct(T, v)
return sp.tensorcontraction(p, (2, 3))
def test_tensor_product():
T = sp.Array([[[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]],
[[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]]])
v = sp.Array([1, 2, 3, 4]).reshape(4, 1)
display(T, v)
display(tensor3_vector_product(T, v))
# test_tensor_product()
def draw_ellipse(ax, xc, A, scale=1.0, show_axis=False):
"""Construct an ellipse representation of a 2x2 matrix.
Parameters
----------
ax: plot axis
xc: np.array 2 x 1
center of the ellipse
mat: np.array 2 x 2
scale: float (default=1.0)
scale factor of the principle axes
"""
eigen_values, eigen_vectors = np.linalg.eig(A)
idx = np.abs(eigen_values).argsort()[::-1]
eigen_values = eigen_values[idx]
eigen_vectors = eigen_vectors[:, idx]
phi = np.rad2deg(np.arctan2(eigen_vectors[1, 0], eigen_vectors[0, 0]))
ellipse = patches.Ellipse(xy=(xc[0, 0], xc[1, 0]),
width=2 * scale * eigen_values[0],
height=2 * scale * eigen_values[1],
angle=phi,
linewidth=2, fill=False)
ax.add_patch(ellipse)
# axis
if show_axis:
x_axis = np.array([[xc[0, 0], xc[1, 0]],
[xc[0, 0] + scale * np.abs(eigen_values[0]) * eigen_vectors[0, 0],
xc[1, 0] + scale * np.abs(eigen_values[0]) * eigen_vectors[1, 0]]])
y_axis = np.array([[xc[0, 0], xc[1, 0]],
[xc[0, 0] + scale * eigen_values[1] * eigen_vectors[0, 1],
xc[1, 0] + scale * eigen_values[1] * eigen_vectors[1, 1]]])
ax.plot(x_axis[:, 0], x_axis[:, 1], '-r', label='x-axis')
ax.plot(y_axis[:, 0], y_axis[:, 1], '-g', label='y-axis')
return phi, eigen_values, eigen_vectors
def test_ellipse():
fig, ax = plt.subplots()
xc = vec([0, 0])
M = mat([[2, 1], [1, 2]])
# M = mat([[-2.75032375, -11.82938331], [-11.82938331, -53.5627191]])
print np.linalg.matrix_rank(M)
phi, l, v = draw_ellipse(ax, xc, M, 1, True)
print(phi, l, v)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.axis('equal')
ax.legend()
fig.show()
# test_ellipse()
def calculate_feasible_muscle_set(feasible_muscle_set_analysis, base_name,
t_start, t_end, dt, speed):
""" Calculates the feasible muscle space of a simulation.
Parameters
----------
feasible_muscle_set_analysis: FeasibleMuscleSetAnalysis
base_name: base name of simulation files
t_start: t start
t_end: t end
dt: time interval for reporting
speed: speed of animation
"""
print('Calculating feasible muscle set ...')
time = np.linspace(t_start, t_end, t_end / dt + 1, endpoint=True)
for i, t in enumerate(tqdm(time)):
visualize_feasible_muscle_set(feasible_muscle_set_analysis, t,
base_name + str(i).zfill(6), 'png')
command = 'convert -delay ' + \
str(speed * dt) + ' -loop 0 ' + base_name + \
'*.png ' + base_name + 'anim.gif'
print(command)
try:
os.system(command)
except:
print('unable to execute command')
def visualize_feasible_muscle_set(feasible_muscle_set_analysis, t,
fig_name='fig/feasible_muscle_set', format='png'):
""" Visualization of the feasible muscle space.
Parameters
----------
feasible_muscle_set_analysis: FeasibleMuscleSetAnalysis
t: time instance to evaluate the feasible
fig_name: figure name for saving
format: format (e.g. .png, .pdf, .eps)
"""
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
feasible_muscle_set_analysis.visualize_simple_muscle(t, ax)
fig.suptitle('t = ' + str(np.around(t, decimals=2)),
y=1.00, fontsize=12, fontweight='bold')
fig.tight_layout()
fig.savefig(fig_name + '.' + format, format=format, dpi=300)
fig.savefig(fig_name + '.pdf', format='pdf', dpi=300)
fig.savefig(fig_name + '.eps', format='eps', dpi=300)
def apply_generalized_force(f):
"""Applies a generalized force (f) in a manner that is consistent with Newton's
3rd law.
Parameters
----------
f: generalized force
"""
n = len(f)
tau = []
for i in range(0, n):
if i == n - 1:
tau.append(f[i])
else:
tau.append(f[i] - f[i + 1])
return tau
def custom_exponent(q, A, k, q_lim):
""" Sympy representation of custom exponent function.
f(q) = A e^(k (q - q_lim)) / (150) ** k
"""
return A * sp.exp(k * (q - q_lim)) / (148.42) ** k
def coordinate_limiting_force(q, q_low, q_up, a, b):
"""A continuous coordinate limiting force for a rotational joint.
It applies an exponential force when approximating a limit. The convention
is that positive force is generated when approaching the lower limit and
negative when approaching the upper. For a = 1, F ~= 1N at the limits.
Parameters
----------
q: generalized coordinate
q_low: lower limit
q_up: upper limit
a: force at limits
b: rate of the slop
Note: q, q_low, q_up must have the same units (e.g. rad)
"""
return custom_exponent(q_low + 5, a, b, q) - custom_exponent(q, a, b, q_up - 5)
def test_limiting_force():
"""
"""
q = np.linspace(0, np.pi / 4, 100, endpoint=True)
f = [coordinate_limiting_force(qq, 0, np.pi / 4, 1, 50) for qq in q]
plt.plot(q, np.array(f))
plt.show()
def gaussian(x, a, m, s):
"""Gaussian function.
f(x) = a e^(-(x - m)^2 / (2 s ^2))
Parameters
----------
x: x
a: peak
m: mean
s: standard deviation
For a good approximation of an impulse at t = 0.3 [x, 1, 0.3, 0.01].
"""
return a * np.exp(- (x - m) ** 2 / (2 * s ** 2))
def test_gaussian():
"""
"""
t = np.linspace(0, 2, 200)
y = [gaussian(tt, 0.4, 0.4, 0.01) for tt in t]
plt.plot(t, y)
plt.show()
def rotate(origin, point, angle):
"""Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
R = np.asmatrix([[np.cos(angle), - np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
q = origin + R * (point - origin)
return q
def sigmoid(t, t0, A, B):
"""Implementation of smooth sigmoid function.
Parameters
----------
t: time to be evalutaed
t0: delay
A: magnitude
B: slope
Returns
-------
(y, y', y'')
"""
return (A * (np.tanh(B * (t - t0)) + 1) / 2,
A * B * (- np.tanh(B * (t - t0)) ** 2 + 1) / 2,
- A * B ** 2 * (- np.tanh(B * (t - t0)) ** 2 + 1) * np.tanh(B * (t - t0)))
def test_sigmoid():
"""
"""
t, A, B, t0 = sp.symbols('t A B t0')
y = A / 2 * (sp.tanh(B * (t - t0 - 1)) + 1)
yd = sp.diff(y, t)
ydd = sp.diff(yd, t)
print('\n', y, '\n', yd, '\n', ydd)
tt = np.linspace(-2, 2, 100)
yy = np.array([sigmoid(x, 0.5, 2, 5) for x in tt])
plt.plot(tt, yy)
plt.show()
def plot_corr_ellipses(data, ax=None, **kwargs):
"""For a given correlation matrix "data", plot the correlation matrix in terms
of ellipses.
parameters
----------
data: Pandas dataframe containing the correlation of the data (df.corr())
ax: axis (e.g. fig, ax = plt.subplots(1, 1))
kwards: keywords arguments (cmap="Greens")
https://stackoverflow.com/questions/34556180/
how-can-i-plot-a-correlation-matrix-as-a-set-of-ellipses-similar-to-the-r-open
"""
M = np.array(data)
if not M.ndim == 2:
raise ValueError('data must be a 2D array')
if ax is None:
fig, ax = plt.subplots(1, 1, subplot_kw={'aspect': 'equal'})
ax.set_xlim(-0.5, M.shape[1] - 0.5)
ax.set_ylim(-0.5, M.shape[0] - 0.5)
# xy locations of each ellipse center
xy = np.indices(M.shape)[::-1].reshape(2, -1).T
# set the relative sizes of the major/minor axes according to the strength of
# the positive/negative correlation
w = np.ones_like(M).ravel()
h = 1 - np.abs(M).ravel()
a = 45 * np.sign(M).ravel()
ec = EllipseCollection(widths=w, heights=h, angles=a, units='x', offsets=xy,
transOffset=ax.transData, array=M.ravel(), **kwargs)
ax.add_collection(ec)
# if data is a DataFrame, use the row/column names as tick labels
if isinstance(data, pd.DataFrame):
ax.set_xticks(np.arange(M.shape[1]))
ax.set_xticklabels(data.columns, rotation=90)
ax.set_yticks(np.arange(M.shape[0]))
ax.set_yticklabels(data.index)
return ec
def get_cmap(n, name='hsv'):
"""Returns a function that maps each index in 0, 1, ..., n-1 to a distinct RGB
color; the keyword argument name must be a standard mpl colormap name.
"""
return plt.cm.get_cmap(name, n)
def assert_if_same(A, B):
"""Assert whether two quantities (value, vector, matrix) are the same."""
assert np.isclose(
np.array(A).astype(np.float64),
np.array(B).astype(np.float64)).all() == True, 'quantities not equal'
def christoffel_symbols(M, q, i, j, k):
"""
M [n x n]: inertia mass matrix
q [n x 1]: generalized coordinates
i, j, k : the indexies to be computed
"""
return sp.Rational(1, 2) * (sp.diff(M[i, j], | |
in enumerate(integer_coords)]
sc = (sample_coords_minus1, sample_coords, sample_coords_plus1, sample_coords_plus2)
quaternary_codes = [quaternary(n, n_dim) for n in range(4 ** n_dim)]
sz = integer_coords[0].get_shape().as_list()
batch_coords = tf.tile(tf.reshape(tf.range(sz[0]), [sz[0]] + [1] * (len(sz) - 1)), [1] + sz[1:])
def make_sample(code):
return tf.gather_nd(params, tf.stack([batch_coords] + [sc[c][i] for i, c in enumerate(code)], -1))
samples = tf.stack([make_sample(code) for code in quaternary_codes]) # [64, n_batch, nx, ny, nz, 3]
weights = tf.stack([tf.reduce_prod(tf.gather(b_spline_weights, code), axis=0)
for code in quaternary_codes]) # [64, n_batch, nx, ny, nz, 3]
ddfs = tf.reduce_sum(weights * samples, axis=0, name='ddfs')
return tf.add(grid, ddfs, name='warped_grid_ffd'), ddfs
class SpatialTransformer(tf.keras.layers.Layer):
"""
N-D Spatial Transformer Tensorflow / Keras Layer
The Layer can handle both affine and dense transforms.
Both transforms are meant to give a 'shift' from the current position.
Therefore, a dense transform gives displacements (not absolute locations) at each voxel,
and an affine transform gives the *difference* of the affine matrix from
the identity matrix.
If you find this function useful, please cite:
Unsupervised Learning for Fast Probabilistic Diffeomorphic Registration
<NAME>, <NAME>, <NAME>, <NAME>
MICCAI 2018.
Originally, this code was based on voxelmorph code, which
was in turn transformed to be dense with the help of (affine) STN code
via https://github.com/kevinzakka/spatial-transformer-network
Since then, we've re-written the code to be generalized to any
dimensions, and along the way wrote grid and interpolation functions
ToDo:
The sampling coordinates in this version are defined in the atlas space.
Need to modify such that the sampling coordinates are defined in the target space.
"""
def __init__(self,
interp_method='linear',
indexing='ij',
single_transform=False,
**kwargs):
"""
Parameters:
interp_method: 'linear' or 'nearest'
single_transform: whether a single transform supplied for the whole batch
indexing (default: 'ij'): 'ij' (matrix) or 'xy' (cartesian)
'xy' indexing will have the first two entries of the flow
(along last axis) flipped compared to 'ij' indexing
"""
self.interp_method = interp_method
self.ndims = None
self.inshape = None
self.single_transform = single_transform
assert indexing in ['ij', 'xy'], "indexing has to be 'ij' (matrix) or 'xy' (cartesian)"
self.indexing = indexing
super(self.__class__, self).__init__(**kwargs)
def build(self, input_shape):
"""
input_shape should be a list for two inputs:
input1: image.
input2: transform Tensor
if affine:
should be a N x N+1 matrix
*or* a N*(N+1) tensor (which will be reshape to N x (N+1) and an identity row added)
if not affine:
should be a *vol_shape x N
"""
if len(input_shape) > 2:
raise Exception('Spatial Transformer must be called on a list of length 2.'
'First argument is the image, second is the transform.')
# set up number of dimensions
self.ndims = len(input_shape[0]) - 2
self.inshape = input_shape
vol_shape = input_shape[0][1:-1]
trf_shape = input_shape[1][1:]
# the transform is an affine iff:
# it's a 1D Tensor [dense transforms need to be at least ndims + 1]
# it's a 2D Tensor and shape == [N+1, N+1].
# [dense with N=1, which is the only one that could have a transform shape of 2, would be of size Mx1]
self.is_affine = len(trf_shape) == 1 or \
(len(trf_shape) == 2 and all([f == (self.ndims + 1) for f in trf_shape]))
# check sizes
if self.is_affine and len(trf_shape) == 1:
ex = self.ndims * (self.ndims + 1)
if trf_shape[0] != ex:
raise Exception('Expected flattened affine of len %d but got %d'
% (ex, trf_shape[0]))
if not self.is_affine:
if trf_shape[-1] != self.ndims:
raise Exception('Offset flow field size expected: %d, found: %d'
% (self.ndims, trf_shape[-1]))
# confirm built
self.built = True
def call(self, inputs):
"""
Parameters
inputs: list with two entries
"""
# check shapes
assert len(inputs) == 2, "inputs has to be len 2, found: %d" % len(inputs)
vol = inputs[0]
trf = inputs[1]
# necessary for multi_gpu models...
vol = tf.reshape(vol, [-1, *self.inshape[0][1:]])
trf = tf.reshape(trf, [-1, *self.inshape[1][1:]])
# go from affine
if self.is_affine:
trf = tf.map_fn(lambda x: self._single_aff_to_shift(x, vol.shape[1:-1]), trf, dtype=tf.float32)
# prepare location shift
if self.indexing == 'xy': # shift the first two dimensions
trf_split = tf.split(trf, trf.shape[-1], axis=-1)
trf_lst = [trf_split[1], trf_split[0], *trf_split[2:]]
trf = tf.concat(trf_lst, -1)
# map transform across batch
if self.single_transform:
fn = lambda x: self._single_transform([x, trf[0, :]])
return tf.map_fn(fn, vol, dtype=tf.float32)
else:
return tf.map_fn(self._single_transform, [vol, trf], dtype=tf.float32)
def _single_aff_to_shift(self, trf, volshape):
if len(trf.shape) == 1: # go from vector to matrix
trf = tf.reshape(trf, [self.ndims, self.ndims + 1])
# note this is unnecessarily extra graph since at every batch entry we have a tf.eye graph
# trf += tf.eye(self.ndims + 1)[:self.ndims, :] # add identity, hence affine is a shift from identity
return affine_to_shift(trf, volshape, shift_center=True)
def _single_transform(self, inputs):
return transform(inputs[0], inputs[1], interp_method=self.interp_method)
class Resize(tf.keras.layers.Layer):
"""
N-D Resize Tensorflow / Keras Layer
Note: this is not re-shaping an existing volume, but resizing, like scipy's "Zoom"
If you find this function useful, please cite:
Anatomical Priors in Convolutional Networks for Unsupervised Biomedical Segmentation,Dalca AV, <NAME>, Sabuncu MR
CVPR 2018
Since then, we've re-written the code to be generalized to any
dimensions, and along the way wrote grid and interpolation functions
"""
def __init__(self,
zoom_factor,
interp_method='linear',
**kwargs):
"""
Parameters:
interp_method: 'linear' or 'nearest'
'xy' indexing will have the first two entries of the flow
(along last axis) flipped compared to 'ij' indexing
"""
self.zoom_factor = zoom_factor
self.interp_method = interp_method
self.ndims = None
self.inshape = None
super(Resize, self).__init__(**kwargs)
def build(self, input_shape):
"""
input_shape should be an element of list of one inputs:
input1: volume
should be a *vol_shape x N
"""
if isinstance(input_shape[0], (list, tuple)) and len(input_shape) > 1:
raise Exception('Resize must be called on a list of length 1.'
'First argument is the image, second is the transform.')
if isinstance(input_shape[0], (list, tuple)):
input_shape = input_shape[0]
# set up number of dimensions
self.ndims = len(input_shape) - 2
self.inshape = input_shape
# confirm built
self.built = True
def call(self, inputs):
"""
Parameters
inputs: volume of list with one volume
"""
# check shapes
if isinstance(inputs, (list, tuple)):
assert len(inputs) == 1, "inputs has to be len 1. found: %d" % len(inputs)
vol = inputs[0]
else:
vol = inputs
# necessary for multi_gpu models...
vol = tf.reshape(vol, [-1, *self.inshape[1:]])
# map transform across batch
return tf.map_fn(self._single_resize, vol, dtype=tf.float32)
def compute_output_shape(self, input_shape):
output_shape = [input_shape[0]]
output_shape += [int(f * self.zoom_factor) for f in input_shape[1:-1]]
output_shape += [input_shape[-1]]
return tuple(output_shape)
def _single_resize(self, inputs):
return resize(inputs, self.zoom_factor, interp_method=self.interp_method)
#######################################################################
# Helper functions
#######################################################################
def b_spline(i, u):
with tf.name_scope('b_spline'):
if i == -1:
return (1 - u) ** 3 / 6
elif i == 0:
return (3 * u ** 3 - 6 * u ** 2 + 4) / 6
elif i == 1:
return (-3 * u ** 3 + 3 * u ** 2 + 3 * u + 1) / 6
elif i == 2:
return u ** 3 / 6
def quaternary(n, rank):
nums = []
while n:
n, r = divmod(n, 4)
nums.append(r)
nums += [0] * (rank - len(nums))
return list(reversed(nums))
#######################################################################
# random affine data augmentation
#######################################################################
def random_affine_matrix(rot_std=np.pi / 12, scl_std=0.1, tra_std=0., she_std=0.1, name='random_affine_params'):
"""
Generate a random affine transformation matrix.
:param rot_std: standard deviation of rotation parameters
:param scl_std: standard deviation of scaling parameters
:param tra_std: standard deviation of translation parameters
:param she_std: standard deviation of shearing parameters
:return: a tensor of shape [1, 12], composed of affine transformation parameters
"""
ax, ay, az = np.random.normal(0, rot_std, 3)
sx, sy, sz = np.random.normal(1, scl_std, 3)
p, q, r = np.random.normal(0, tra_std, 3)
hxy, hxz, hyx, hyz, hzx, hzy = np.random.normal(0, she_std, 6)
# Translation matrix
Tr = np.asarray([[1, 0, 0, p],
[0, 1, 0, q],
[0, 0, 1, r],
[0, 0, 0, 1]], dtype=np.float32)
# Scaling matrix
Sc = np.asarray([[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1]], dtype=np.float32)
# Shear matrix
Sh = np.asarray([[1, hxy, hxz, 0],
[hyx, 1, hyz, 0],
[hzx, hzy, 1, 0],
[0, 0, 0, 1]], dtype=np.float32)
# Rotation matrix about each axis
Rx = np.asarray([[1, 0, 0, 0],
[0, np.cos(ax), -np.sin(ax), 0],
[0, np.sin(ax), np.cos(ax), 0],
[0, | |
grounding[GroundingIndex(2,0,"paragraphs of #REF")] = GroundingKey.make_table_grounding("Paragraphs")
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_sql(sql_query, schema)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=True,
return_message=True)
self.assertTrue(equal, message)
class TestSpiderDev414(unittest.TestCase):
@timeout(ONE_TEST_TIMEOUT)
def test_spider_dev(self):
"""Test an entry from spider dataset
"""
split_name = "dev"
i_query = 414
db_id = get_db_id(split_name, i_query)
rdf_graph, schema = get_graph_and_schema(split_name, db_id)
sql_query = get_sql_query(split_name, i_query)
# SQL:
# SELECT name , Level_of_membership FROM visitor WHERE Level_of_membership > 4 ORDER BY age DESC
# Question: Find the name and membership level of the visitors whose membership level is higher than 4, and sort by their age from old to young.
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name ?Level_of_membership
WHERE
{
?visitor arc:visitor:Level_of_membership ?Level_of_membership.
FILTER(?Level_of_membership > 4).
?visitor arc:visitor:Name ?Name.
?visitor arc:visitor:Age ?Age.
}
ORDER BY DESC(?Age)""")
qdmr = get_qdmr_from_break(split_name, i_query)
qdmr.args[-1] = ["#7", "#6", "from old to young"]
# break_program:
# SELECT['visitors']
# PROJECT['membership levels of #REF', '#1']
# COMPARATIVE['#1', '#2', 'is higher than 4']
# PROJECT['names of #REF', '#3']
# PROJECT['membership levels of #REF', '#3']
# PROJECT['ages of #REF', '#3']
# UNION['#4', '#5']
# SORT['#7', '#6', 'from old to young']
grounding = {}
grounding[GroundingIndex(0,0,"visitors")] = GroundingKey.make_table_grounding("visitor")
grounding[GroundingIndex(1,0,"membership levels of #REF")] = GroundingKey.make_column_grounding("visitor", "Level_of_membership")
grounding[GroundingIndex(2,2,"is higher than 4")] = GroundingKey.make_comparative_grounding(">", "4")
grounding[GroundingIndex(3,0,"names of #REF")] = GroundingKey.make_column_grounding("visitor", "Name")
grounding[GroundingIndex(4,0,"membership levels of #REF")] = GroundingKey.make_column_grounding("visitor", "Level_of_membership")
grounding[GroundingIndex(5,0,"ages of #REF")] = GroundingKey.make_column_grounding("visitor", "Age")
grounding[GroundingIndex(7,2,"from old to young")] = GroundingKey.make_sortdir_grounding(ascending=False)
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_sql(sql_query, schema)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=True,
return_message=True)
self.assertTrue(equal, message)
class TestSpiderDev426(unittest.TestCase):
@timeout(ONE_TEST_TIMEOUT)
def test_spider_dev(self):
"""Test an entry from spider dataset
"""
split_name = "dev"
i_query = 426
db_id = get_db_id(split_name, i_query)
rdf_graph, schema = get_graph_and_schema(split_name, db_id)
sql_query = get_sql_query(split_name, i_query)
# SQL:
# SELECT t1.name FROM visitor AS t1 JOIN visit AS t2 ON t1.id = t2.visitor_id JOIN museum AS t3 ON t3.Museum_ID = t2.Museum_ID
# WHERE t3.open_year < 2009
# INTERSECT
# SELECT t1.name FROM visitor AS t1 JOIN visit AS t2 ON t1.id = t2.visitor_id JOIN museum AS t3 ON t3.Museum_ID = t2.Museum_ID WHERE
# t3.open_year > 2011
# Question: What is the name of the visitor who visited both a museum opened before 2009 and a museum opened after 2011?
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name ?Level_of_membership
WHERE
{
?visitor arc:visitor:Level_of_membership ?Level_of_membership.
FILTER(?Level_of_membership > 4).
?visitor arc:visitor:Name ?Name.
?visitor arc:visitor:Age ?Age.
}
ORDER BY DESC(?Age)""")
qdmr = get_qdmr_from_break(split_name, i_query)
# break_program:
# #1: SELECT['museums']
# #2: FILTER['#1', 'that opened before 2009']
# #3: FILTER['#1', 'that opened after 2011']
# #4: PROJECT['the visitor of #REF', '#1']
# #5: INTERSECTION['#4', '#2', '#3']
# #6: PROJECT['name of #REF', '#5']
grounding = {}
grounding[GroundingIndex(0,0,"museums")] = GroundingKey.make_table_grounding("museum")
grounding[GroundingIndex(1,1,"that opened before 2009")] = GroundingKey.make_comparative_grounding("<", "2009", GroundingKey.make_column_grounding("museum", "Open_Year"))
grounding[GroundingIndex(2,1,"that opened after 2011")] = GroundingKey.make_comparative_grounding(">", "2011", GroundingKey.make_column_grounding("museum", "Open_Year"))
grounding[GroundingIndex(3,0,"the visitor of #REF")] = GroundingKey.make_table_grounding("visitor")
grounding[GroundingIndex(5,0,"name of #REF")] = GroundingKey.make_column_grounding("visitor", "Name")
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_sql(sql_query, schema)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_spider_dev_swap_args(self):
"""Test an entry from spider dataset
"""
split_name = "dev"
i_query = 426
db_id = get_db_id(split_name, i_query)
rdf_graph, schema = get_graph_and_schema(split_name, db_id)
sql_query = get_sql_query(split_name, i_query)
# SQL:
# SELECT t1.name FROM visitor AS t1 JOIN visit AS t2 ON t1.id = t2.visitor_id JOIN museum AS t3 ON t3.Museum_ID = t2.Museum_ID
# WHERE t3.open_year < 2009
# INTERSECT
# SELECT t1.name FROM visitor AS t1 JOIN visit AS t2 ON t1.id = t2.visitor_id JOIN museum AS t3 ON t3.Museum_ID = t2.Museum_ID WHERE
# t3.open_year > 2011
# Question: What is the name of the visitor who visited both a museum opened before 2009 and a museum opened after 2011?
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name ?Level_of_membership
WHERE
{
?visitor arc:visitor:Level_of_membership ?Level_of_membership.
FILTER(?Level_of_membership > 4).
?visitor arc:visitor:Name ?Name.
?visitor arc:visitor:Age ?Age.
}
ORDER BY DESC(?Age)""")
qdmr = get_qdmr_from_break(split_name, i_query)
# break_program:
# #1: SELECT['museums']
# #2: FILTER['#1', 'that opened before 2009']
# #3: FILTER['#1', 'that opened after 2011']
# #4: PROJECT['the visitor of #REF', '#1']
# #5: INTERSECTION['#4', '#2', '#3']
# #6: PROJECT['name of #REF', '#5']
grounding = {}
grounding[GroundingIndex(0,0,"museums")] = GroundingKey.make_table_grounding("museum")
grounding[GroundingIndex(1,1,"that opened before 2009")] = GroundingKey.make_comparative_grounding(">", "2011", GroundingKey.make_column_grounding("museum", "Open_Year"))
grounding[GroundingIndex(2,1,"that opened after 2011")] = GroundingKey.make_comparative_grounding("<", "2009", GroundingKey.make_column_grounding("museum", "Open_Year"))
grounding[GroundingIndex(3,0,"the visitor of #REF")] = GroundingKey.make_table_grounding("visitor")
grounding[GroundingIndex(5,0,"name of #REF")] = GroundingKey.make_column_grounding("visitor", "Name")
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_sql(sql_query, schema)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
@timeout(ONE_TEST_TIMEOUT)
def test_spider_dev_intersection_via_double_filter(self):
"""Test an entry from spider dataset
"""
split_name = "dev"
i_query = 426
db_id = get_db_id(split_name, i_query)
rdf_graph, schema = get_graph_and_schema(split_name, db_id)
sql_query = get_sql_query(split_name, i_query)
# SQL:
# SELECT t1.name FROM visitor AS t1 JOIN visit AS t2 ON t1.id = t2.visitor_id JOIN museum AS t3 ON t3.Museum_ID = t2.Museum_ID
# WHERE t3.open_year < 2009
# INTERSECT
# SELECT t1.name FROM visitor AS t1 JOIN visit AS t2 ON t1.id = t2.visitor_id JOIN museum AS t3 ON t3.Museum_ID = t2.Museum_ID WHERE
# t3.open_year > 2011
# Question: What is the name of the visitor who visited both a museum opened before 2009 and a museum opened after 2011?
correct_sparql_query = textwrap.dedent("""\
SELECT ?Name
WHERE
{
{
SELECT ?visitor
WHERE
{
{
SELECT ?visitor
WHERE
{
?visitor_ID arc:visit:visitor_ID:visitor:ID ?visitor.
?visit arc:visit:visitor_ID ?visitor_ID.
?visit arc:visit:Museum_ID ?Museum_ID.
?Museum_ID arc:visit:Museum_ID:museum:Museum_ID ?museum.
?museum arc:museum:Open_Year ?Open_Year.
FILTER(?Open_Year < "2009").
}
GROUP BY ?visitor
}
?visitor_ID_1 arc:visit:visitor_ID:visitor:ID ?visitor.
?visit_1 arc:visit:visitor_ID ?visitor_ID_1.
?visit_1 arc:visit:Museum_ID ?Museum_ID_1.
?Museum_ID_1 arc:visit:Museum_ID:museum:Museum_ID ?museum_1.
?museum_1 arc:museum:Open_Year ?Open_Year_1.
FILTER(?Open_Year_1 > "2011").
}
GROUP BY ?visitor
}
?visitor arc:visitor:Name ?Name.
}""")
qdmr = get_qdmr_from_break(split_name, i_query)
# break_program:
# #1: SELECT['visitor']
# #2: FILTER['#1', 'that visited a museum opened before 2009']
# #3: FILTER['#2', 'that visited a museum opened after 2011']
# #4: PROJECT['name of #REF', '#3']
qdmr = QdmrInstance(["select", "filter", "filter", "project"],
[["visitor"],
['#1', 'that visited a museum opened before 2009'],
['#2', 'that visited a museum opened after 2011'],
['name of #REF', '#3']
])
grounding = {}
grounding[GroundingIndex(0,0,"visitor")] = GroundingKey.make_table_grounding("visitor")
grounding[GroundingIndex(1,1,"that visited a museum opened before 2009")] = GroundingKey.make_comparative_grounding("<", "2009", GroundingKey.make_column_grounding("museum", "Open_Year"))
grounding[GroundingIndex(2,1,"that visited a museum opened after 2011")] = GroundingKey.make_comparative_grounding(">", "2011", GroundingKey.make_column_grounding("museum", "Open_Year"))
grounding[GroundingIndex(3,0,"name of #REF")] = GroundingKey.make_column_grounding("visitor", "Name")
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_sql(sql_query, schema)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
class TestSpiderTrain1353(unittest.TestCase):
@timeout(ONE_TEST_TIMEOUT)
def test_spider_dev(self):
"""Test an entry from spider dataset
"""
split_name = "train"
i_query = 1353
db_id = get_db_id(split_name, i_query)
rdf_graph, schema = get_graph_and_schema(split_name, db_id)
sql_query = get_sql_query(split_name, i_query)
# Question: What is the sum of budgets of the Marketing and Finance departments?
# sql_query:
# SELECT sum(budget) FROM department WHERE dept_name = 'Marketing' OR dept_name = 'Finance'
correct_sparql_query = textwrap.dedent("""\
SELECT (?budget_1 + ?budget_2 AS ?sum)
WHERE
{
?dep_1 arc:department:budget ?budget_1.
?dep_1 arc:department:dept_name ?dept_name_1.
FILTER(?dept_name_1 = key:department:dept_name:Marketing).
?dep_2 arc:department:budget ?budget_2.
?dep_2 arc:department:dept_name ?dept_name_2.
FILTER(?dept_name_2 = key:department:dept_name:Finance).
}""")
qdmr = get_qdmr_from_break(split_name, i_query)
# break_program:
# SELECT['budgets']
# FILTER['#1', 'of the Marketing department']
# FILTER['#1', 'of the Finance department']
# ARITHMETIC['sum', '#2', '#3']
grounding = {}
grounding[GroundingIndex(0,0,"budgets")] = GroundingKey.make_column_grounding("department", "budget")
# grounding looks like key:department:dept_name:Marketing because that value is a key in the RDF graph
grounding[GroundingIndex(1,1,"of the Marketing department")] = GroundingKey.make_value_grounding("department", "dept_name", "Marketing")
grounding[GroundingIndex(2,1,"of the Finance department")] = GroundingKey.make_value_grounding("department", "dept_name", "Finance")
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_sql(sql_query, schema)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
equal, message = result.is_equal_to(result_correct,
require_column_order=True,
require_row_order=False,
return_message=True)
self.assertTrue(equal, message)
class TestSpiderTrain4320(unittest.TestCase):
@timeout(ONE_TEST_TIMEOUT)
def test_spider_dev(self):
"""Test an entry from spider dataset
"""
split_name = "train"
i_query = 4320
db_id = get_db_id(split_name, i_query)
rdf_graph, schema = get_graph_and_schema(split_name, db_id)
sql_query = get_sql_query(split_name, i_query)
# Question: What are the distinct grant amount for the grants where the documents were sent before '1986-08-26 20:49:27' and grant were ended after '1989-03-16 18:27:16'?
# sql_query:
# SELECT T1.grant_amount FROM Grants AS T1 JOIN Documents AS T2 ON T1.grant_id | |
<reponame>denisgolius/aws-syndicate
"""
Copyright 2018 EPAM Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import concurrent
import json
from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor
from datetime import date, datetime
from functools import cmp_to_key
from syndicate.commons.log_helper import get_logger
from syndicate.core.build.bundle_processor import (create_deploy_output,
load_deploy_output,
load_failed_deploy_output,
load_meta_resources,
remove_deploy_output,
remove_failed_deploy_output)
from syndicate.core.build.meta_processor import resolve_meta
from syndicate.core.constants import (BUILD_META_FILE_NAME,
CLEAN_RESOURCE_TYPE_PRIORITY,
DEPLOY_RESOURCE_TYPE_PRIORITY,
LAMBDA_TYPE)
from syndicate.core.helper import exit_on_exception, prettify_json
from syndicate.core.resources import (APPLY_MAPPING, CREATE_RESOURCE,
DESCRIBE_RESOURCE, REMOVE_RESOURCE,
RESOURCE_CONFIGURATION_PROCESSORS,
RESOURCE_IDENTIFIER, UPDATE_RESOURCE)
_LOG = get_logger('syndicate.core.build.deployment_processor')
def get_dependencies(name, meta, resources_dict, resources):
""" Get dependencies from resources that needed to create them too.
:type name: str
:type meta: dict
:type resources_dict: dict
:param resources:
:param resources_dict: resources that will be created {name: meta}
"""
resources_dict[name] = meta
if meta.get('dependencies'):
for dependency in meta.get('dependencies'):
dep_name = dependency['resource_name']
dep_meta = resources[dep_name]
resources_dict[dep_name] = dep_meta
if dep_meta.get('dependencies'):
get_dependencies(dep_name, dep_meta, resources_dict, resources)
# todo implement resources sorter according to priority
def _process_resources(resources, handlers_mapping):
res_type = None
output = {}
args = []
resource_type = None
try:
for res_name, res_meta in resources:
res_type = res_meta['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
args.append({'name': res_name, 'meta': res_meta})
continue
elif res_type != resource_type:
_LOG.info('Processing {0} resources ...'.format(resource_type))
func = handlers_mapping[resource_type]
response = func(args) # todo exception may be raised here
if response:
output.update(response)
del args[:]
args.append({'name': res_name, 'meta': res_meta})
resource_type = res_type
if args:
_LOG.info('Processing {0} resources ...'.format(resource_type))
func = handlers_mapping[resource_type]
response = func(args)
if response:
output.update(response)
return True, output
except Exception as e:
_LOG.exception('Error occurred while {0} '
'resource creating: {1}'.format(res_type, str(e)))
# args list always contains one item here
return False, update_failed_output(args[0]['name'], args[0]['meta'],
resource_type, output)
def update_failed_output(res_name, res_meta, resource_type, output):
describe_func = DESCRIBE_RESOURCE[resource_type]
failed_resource_output = describe_func(res_name, res_meta)
if failed_resource_output:
if isinstance(failed_resource_output, list):
for item in failed_resource_output:
output.update(item)
else:
output.update(failed_resource_output)
return output
def deploy_resources(resources):
return _process_resources(resources=resources,
handlers_mapping=CREATE_RESOURCE)
def update_resources(resources):
return _process_resources(resources=resources,
handlers_mapping=UPDATE_RESOURCE)
def clean_resources(output):
args = []
resource_type = None
# clean all resources
for arn, config in output:
res_type = config['resource_meta']['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
args.append({'arn': arn, 'config': config})
continue
elif res_type != resource_type:
_LOG.info('Removing {0} resources ...'.format(resource_type))
func = REMOVE_RESOURCE[resource_type]
func(args)
del args[:]
args.append({'arn': arn, 'config': config})
resource_type = res_type
if args:
_LOG.info('Removing {0} resources ...'.format(resource_type))
func = REMOVE_RESOURCE[resource_type]
func(args)
# todo implement saving failed output
def continue_deploy_resources(resources, failed_output):
updated_output = {}
deploy_result = True
res_type = None
try:
args = []
resource_type = None
for res_name, res_meta in resources:
res_type = res_meta['resource_type']
if resource_type is None:
resource_type = res_type
if res_type == resource_type:
resource_output = __find_output_by_resource_name(
failed_output, res_name)
args.append(
{
'name': res_name,
'meta': res_meta,
'current_configurations': resource_output
})
continue
elif res_type != resource_type:
func = RESOURCE_CONFIGURATION_PROCESSORS.get(resource_type)
if func:
response = func(args)
if response:
updated_output.update(
json.loads(
json.dumps(response, default=_json_serial)))
else:
# function to update resource is not present
# move existing output for resources to new output
__move_output_content(args, failed_output, updated_output)
del args[:]
resource_output = __find_output_by_resource_name(
failed_output, res_name)
args.append({
'name': res_name,
'meta': res_meta,
'current_configurations': resource_output
})
resource_type = res_type
if args:
func = RESOURCE_CONFIGURATION_PROCESSORS.get(resource_type)
if func:
response = func(args)
if response:
updated_output.update(
json.loads(
json.dumps(response, default=_json_serial)))
else:
# function to update resource is not present
# move existing output- for resources to new output
__move_output_content(args, failed_output, updated_output)
except Exception as e:
_LOG.exception('Error occurred while {0} resource creating: {1}'.format(
res_type, str(e)))
deploy_result = False
return deploy_result, updated_output
def __move_output_content(args, failed_output, updated_output):
for arg in args:
resource_output = __find_output_by_resource_name(
failed_output, arg['name'])
if resource_output:
updated_output.update(resource_output)
def __find_output_by_resource_name(output, resource_name):
found_items = {}
for k, v in output.items():
if v['resource_name'] == resource_name:
found_items[k] = v
return found_items
@exit_on_exception
def create_deployment_resources(deploy_name, bundle_name,
deploy_only_resources=None,
deploy_only_types=None,
excluded_resources=None, excluded_types=None):
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
# validate_deployment_packages(resources)
_LOG.info('{0} file was loaded successfully'.format(BUILD_META_FILE_NAME))
# TODO make filter chain
if deploy_only_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k in deploy_only_resources)
if excluded_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_resources)
if deploy_only_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] in deploy_only_types)
if excluded_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] not in excluded_types)
_LOG.debug('Going to create: {0}'.format(prettify_json(resources)))
# sort resources with priority
resources_list = list(resources.items())
resources_list.sort(key=cmp_to_key(_compare_deploy_resources))
_LOG.info('Going to deploy AWS resources')
success, output = deploy_resources(resources_list)
if success:
_LOG.info('AWS resources were deployed successfully')
# apply dynamic changes that uses ARNs
_LOG.info('Going to apply dynamic changes')
_apply_dynamic_changes(resources, output)
_LOG.info('Dynamic changes were applied successfully')
_LOG.info('Going to create deploy output')
output_str = json.dumps(output, default=_json_serial)
create_deploy_output(bundle_name, deploy_name, output_str, success)
_LOG.info('Deploy output for {0} was created.'.format(deploy_name))
return success
@exit_on_exception
def remove_deployment_resources(deploy_name, bundle_name,
clean_only_resources=None,
clean_only_types=None,
excluded_resources=None, excluded_types=None):
output = load_deploy_output(bundle_name, deploy_name)
_LOG.info('Output file was loaded successfully')
# TODO make filter chain
if clean_only_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] in clean_only_resources)
if excluded_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] not in excluded_resources)
if clean_only_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta']['resource_type'] in clean_only_types)
if excluded_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta'][
'resource_type'] not in excluded_types)
# sort resources with priority
resources_list = list(output.items())
resources_list.sort(key=cmp_to_key(_compare_clean_resources))
_LOG.debug('Resources to delete: {0}'.format(resources_list))
_LOG.info('Going to clean AWS resources')
clean_resources(resources_list)
# remove output from bucket
remove_deploy_output(bundle_name, deploy_name)
@exit_on_exception
def continue_deployment_resources(deploy_name, bundle_name,
deploy_only_resources=None,
deploy_only_types=None,
excluded_resources=None,
excluded_types=None):
output = load_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Failed output file was loaded successfully')
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
# TODO make filter chain
if deploy_only_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k in deploy_only_resources)
if excluded_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_resources)
if deploy_only_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] in deploy_only_types)
if excluded_types:
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] not in excluded_types)
# sort resources with priority
resources_list = list(resources.items())
resources_list.sort(key=cmp_to_key(_compare_deploy_resources))
success, updated_output = continue_deploy_resources(resources_list, output)
_LOG.info('AWS resources were deployed successfully')
if success:
# apply dynamic changes that uses ARNs
_LOG.info('Going to apply dynamic changes')
_apply_dynamic_changes(resources, updated_output)
_LOG.info('Dynamic changes were applied successfully')
# remove failed output from bucket
remove_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Going to create deploy output')
create_deploy_output(bundle_name, deploy_name,
prettify_json(updated_output), success=success)
return success
@exit_on_exception
def remove_failed_deploy_resources(deploy_name, bundle_name,
clean_only_resources=None,
clean_only_types=None,
excluded_resources=None,
excluded_types=None):
output = load_failed_deploy_output(bundle_name, deploy_name)
_LOG.info('Failed output file was loaded successfully')
# TODO make filter chain
if clean_only_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] in clean_only_resources)
if excluded_resources:
output = dict((k, v) for (k, v) in output.items() if
v['resource_name'] not in excluded_resources)
if clean_only_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta']['resource_type'] in clean_only_types)
if excluded_types:
output = dict((k, v) for (k, v) in output.items() if
v['resource_meta'][
'resource_type'] not in excluded_types)
# sort resources with priority
resources_list = list(output.items())
resources_list.sort(key=cmp_to_key(_compare_clean_resources))
_LOG.info('Going to clean AWS resources')
clean_resources(resources_list)
# remove output from bucket
remove_failed_deploy_output(bundle_name, deploy_name)
@exit_on_exception
def update_lambdas(bundle_name,
publish_only_lambdas,
excluded_lambdas_resources):
resources = resolve_meta(load_meta_resources(bundle_name))
_LOG.debug('Names were resolved')
_LOG.debug(prettify_json(resources))
# TODO make filter chain
resources = dict((k, v) for (k, v) in resources.items() if
v['resource_type'] == LAMBDA_TYPE)
if publish_only_lambdas:
resources = dict((k, v) for (k, v) in resources.items() if
k in publish_only_lambdas)
if excluded_lambdas_resources:
resources = dict((k, v) for (k, v) in resources.items() if
k not in excluded_lambdas_resources)
_LOG.debug('Going to update the following lambdas: {0}'.format(
prettify_json(resources)))
resources = list(resources.items())
update_resources(resources=resources)
def _json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def _apply_dynamic_changes(resources, output):
pool = ThreadPoolExecutor(max_workers=5)
futures = []
for name, meta in resources.items():
resource_type = meta['resource_type']
apply_changes = meta.get('apply_changes')
if apply_changes:
for apply_item in apply_changes:
change_type = apply_item['apply_type']
dependency_name = apply_item['dependency_name']
res_config = resources.get(dependency_name)
if not res_config:
_LOG.debug('Dependency resource {0} is not found, '
'skipping the | |
dst_mean_vec - dst_mean_vec.dot(normal) * normal
cos_dihedral = src_mean_projection.dot(dst_mean_projection) / (
np.linalg.norm(src_mean_projection) * np.linalg.norm(dst_mean_projection))
dihedral_angle = np.arccos(cos_dihedral)
edges.append([src_idx, dst_idx])
mask.append(1)
distances.append(np.linalg.norm(src_to_dst))
angles.append(dihedral_angle)
edges.append([dst_idx, src_idx])
distances.append(np.linalg.norm(src_to_dst))
mask.append(1)
angles.append(dihedral_angle)
edges = torch.tensor(edges)
graph = dgl.graph((edges[:, 0], edges[:, 1]), num_nodes=len(coords), idtype=torch.int32)
graph.ndata['feat'] = lig_atom_featurizer(lig)
graph.ndata['weights'] = torch.from_numpy(np.array(weights).astype(np.float32))
graph.edata['feat'] = distance_featurizer(distances, 0.75) # avg distance = 1.3 So divisor = (4/7)*1.3 = ~0.75
graph.ndata['x'] = torch.from_numpy(np.array(coords).astype(np.float32))
return graph, torch.tensor(mask, dtype=bool), torch.tensor(angles, dtype=torch.float32)
def get_geometry_graph(lig):
coords = lig.GetConformer().GetPositions()
edges_src = []
edges_dst = []
for i, atom in enumerate(lig.GetAtoms()):
src_idx = atom.GetIdx()
assert src_idx == i
one_hop_dsts = [neighbor for neighbor in list(atom.GetNeighbors())]
two_and_one_hop_idx = [neighbor.GetIdx() for neighbor in one_hop_dsts]
for one_hop_dst in one_hop_dsts:
for two_hop_dst in one_hop_dst.GetNeighbors():
two_and_one_hop_idx.append(two_hop_dst.GetIdx())
all_dst_idx = list(set(two_and_one_hop_idx))
all_dst_idx.remove(src_idx)
all_src_idx = [src_idx] *len(all_dst_idx)
edges_src.extend(all_src_idx)
edges_dst.extend(all_dst_idx)
graph = dgl.graph((torch.tensor(edges_src), torch.tensor(edges_dst)), num_nodes=lig.GetNumAtoms(), idtype=torch.long)
graph.edata['feat'] = torch.from_numpy(np.linalg.norm(coords[edges_src] - coords[edges_dst], axis=1).astype(np.float32))
return graph
def isRingAromatic(mol, bondRing):
for id in bondRing:
if not mol.GetBondWithIdx(id).GetIsAromatic():
return False
return True
def get_geometry_graph_ring(lig):
coords = lig.GetConformer().GetPositions()
rings = lig.GetRingInfo().AtomRings()
bond_rings = lig.GetRingInfo().BondRings()
edges_src = []
edges_dst = []
for i, atom in enumerate(lig.GetAtoms()):
src_idx = atom.GetIdx()
assert src_idx == i
one_hop_dsts = [neighbor for neighbor in list(atom.GetNeighbors())]
two_and_one_hop_idx = [neighbor.GetIdx() for neighbor in one_hop_dsts]
for one_hop_dst in one_hop_dsts:
for two_hop_dst in one_hop_dst.GetNeighbors():
two_and_one_hop_idx.append(two_hop_dst.GetIdx())
all_dst_idx = list(set(two_and_one_hop_idx))
for ring_idx, ring in enumerate(rings):
if src_idx in ring and isRingAromatic(lig,bond_rings[ring_idx]):
all_dst_idx.extend(list(ring))
all_dst_idx = list(set(all_dst_idx))
all_dst_idx.remove(src_idx)
all_src_idx = [src_idx] *len(all_dst_idx)
edges_src.extend(all_src_idx)
edges_dst.extend(all_dst_idx)
graph = dgl.graph((torch.tensor(edges_src), torch.tensor(edges_dst)), num_nodes=lig.GetNumAtoms(), idtype=torch.long)
graph.edata['feat'] = torch.from_numpy(np.linalg.norm(coords[edges_src] - coords[edges_dst], axis=1).astype(np.float32))
return graph
def get_lig_graph_multiple_conformer(mol, name, radius=20, max_neighbors=None, use_rdkit_coords=False, num_confs=10):
conf = mol.GetConformer()
true_lig_coords = conf.GetPositions()
try:
count = 0
success = False
while not success:
try:
all_lig_coords = get_multiple_rdkit_coords_individual(mol,num_conf=num_confs)
success = True
except Exception as e:
print(f'failed RDKit coordinate generation. Trying the {count}th time.')
if count > 5:
raise Exception(e)
count +=1
except Exception as e:
all_lig_coords = [true_lig_coords] * num_confs
with open('temp_create_dataset_rdkit.log', 'a') as f:
f.write('Generating RDKit conformer failed for \n')
f.write(name)
f.write('\n')
f.write(str(e))
f.write('\n')
f.flush()
print('Generating RDKit conformer failed for ')
print(name)
print(str(e))
lig_graphs = []
for i in range(num_confs):
R, t = rigid_transform_Kabsch_3D(all_lig_coords[i].T, true_lig_coords.T)
lig_coords = ((R @ (all_lig_coords[i]).T).T + t.squeeze())
log('kabsch RMSD between rdkit ligand and true ligand is ',
np.sqrt(np.sum((lig_coords - true_lig_coords) ** 2, axis=1).mean()).item())
num_nodes = lig_coords.shape[0]
assert lig_coords.shape[1] == 3
distance = spa.distance.cdist(lig_coords, lig_coords)
src_list = []
dst_list = []
dist_list = []
mean_norm_list = []
for i in range(num_nodes):
dst = list(np.where(distance[i, :] < radius)[0])
dst.remove(i)
if max_neighbors != None and len(dst) > max_neighbors:
dst = list(np.argsort(distance[i, :]))[1: max_neighbors + 1] # closest would be self loop
if len(dst) == 0:
dst = list(np.argsort(distance[i, :]))[1:2] # closest would be the index i itself > self loop
log(
f'The lig_radius {radius} was too small for one lig atom such that it had no neighbors. So we connected {i} to the closest other lig atom {dst}')
assert i not in dst
src = [i] * len(dst)
src_list.extend(src)
dst_list.extend(dst)
valid_dist = list(distance[i, dst])
dist_list.extend(valid_dist)
valid_dist_np = distance[i, dst]
sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))
weights = softmax(- valid_dist_np.reshape((1, -1)) ** 2 / sigma, axis=1) # (sigma_num, neigh_num)
assert weights[0].sum() > 1 - 1e-2 and weights[0].sum() < 1.01
diff_vecs = lig_coords[src, :] - lig_coords[dst, :] # (neigh_num, 3)
mean_vec = weights.dot(diff_vecs) # (sigma_num, 3)
denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1)) # (sigma_num,)
mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator # (sigma_num,)
mean_norm_list.append(mean_vec_ratio_norm)
assert len(src_list) == len(dst_list)
assert len(dist_list) == len(dst_list)
graph = dgl.graph((torch.tensor(src_list), torch.tensor(dst_list)), num_nodes=num_nodes, idtype=torch.int32)
graph.ndata['feat'] = lig_atom_featurizer(mol)
graph.edata['feat'] = distance_featurizer(dist_list, 0.75) # avg distance = 1.3 So divisor = (4/7)*1.3 = ~0.75
graph.ndata['x'] = torch.from_numpy(np.array(true_lig_coords).astype(np.float32))
graph.ndata['mu_r_norm'] = torch.from_numpy(np.array(mean_norm_list).astype(np.float32))
if use_rdkit_coords:
graph.ndata['new_x'] = torch.from_numpy(np.array(lig_coords).astype(np.float32))
lig_graphs.append(graph)
return lig_graphs
def get_lig_graph_revised(mol, name, radius=20, max_neighbors=None, use_rdkit_coords=False):
conf = mol.GetConformer()
true_lig_coords = conf.GetPositions()
if use_rdkit_coords:
try:
rdkit_coords = get_rdkit_coords(mol).numpy()
R, t = rigid_transform_Kabsch_3D(rdkit_coords.T, true_lig_coords.T)
lig_coords = ((R @ (rdkit_coords).T).T + t.squeeze())
log('kabsch RMSD between rdkit ligand and true ligand is ', np.sqrt(np.sum((lig_coords - true_lig_coords) ** 2, axis=1).mean()).item())
except Exception as e:
lig_coords = true_lig_coords
with open('temp_create_dataset_rdkit_timesplit_no_lig_or_rec_overlap_train.log', 'a') as f:
f.write('Generating RDKit conformer failed for \n')
f.write(name)
f.write('\n')
f.write(str(e))
f.write('\n')
f.flush()
print('Generating RDKit conformer failed for ')
print(name)
print(str(e))
else:
lig_coords = true_lig_coords
num_nodes = lig_coords.shape[0]
assert lig_coords.shape[1] == 3
distance = spa.distance.cdist(lig_coords, lig_coords)
src_list = []
dst_list = []
dist_list = []
mean_norm_list = []
for i in range(num_nodes):
dst = list(np.where(distance[i, :] < radius)[0])
dst.remove(i)
if max_neighbors != None and len(dst) > max_neighbors:
dst = list(np.argsort(distance[i, :]))[1: max_neighbors + 1] # closest would be self loop
if len(dst) == 0:
dst = list(np.argsort(distance[i, :]))[1:2] # closest would be the index i itself > self loop
log(
f'The lig_radius {radius} was too small for one lig atom such that it had no neighbors. So we connected {i} to the closest other lig atom {dst}')
assert i not in dst
src = [i] * len(dst)
src_list.extend(src)
dst_list.extend(dst)
valid_dist = list(distance[i, dst])
dist_list.extend(valid_dist)
valid_dist_np = distance[i, dst]
sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))
weights = softmax(- valid_dist_np.reshape((1, -1)) ** 2 / sigma, axis=1) # (sigma_num, neigh_num)
assert weights[0].sum() > 1 - 1e-2 and weights[0].sum() < 1.01
diff_vecs = lig_coords[src, :] - lig_coords[dst, :] # (neigh_num, 3)
mean_vec = weights.dot(diff_vecs) # (sigma_num, 3)
denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1)) # (sigma_num,)
mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator # (sigma_num,)
mean_norm_list.append(mean_vec_ratio_norm)
assert len(src_list) == len(dst_list)
assert len(dist_list) == len(dst_list)
graph = dgl.graph((torch.tensor(src_list), torch.tensor(dst_list)), num_nodes=num_nodes, idtype=torch.int32)
graph.ndata['feat'] = lig_atom_featurizer(mol)
graph.edata['feat'] = distance_featurizer(dist_list, 0.75) # avg distance = 1.3 So divisor = (4/7)*1.3 = ~0.75
graph.ndata['x'] = torch.from_numpy(np.array(true_lig_coords).astype(np.float32))
graph.ndata['mu_r_norm'] = torch.from_numpy(np.array(mean_norm_list).astype(np.float32))
if use_rdkit_coords:
graph.ndata['new_x'] = torch.from_numpy(np.array(lig_coords).astype(np.float32))
return graph
def distance_featurizer(dist_list, divisor) -> torch.Tensor:
# you want to use a divisor that is close to 4/7 times the average distance that you want to encode
length_scale_list = [1.5 ** x for x in range(15)]
center_list = [0. for _ in range(15)]
num_edge = len(dist_list)
dist_list = np.array(dist_list)
transformed_dist = [np.exp(- ((dist_list / divisor) ** 2) / float(length_scale))
for length_scale, center in zip(length_scale_list, center_list)]
transformed_dist = np.array(transformed_dist).T
transformed_dist = transformed_dist.reshape((num_edge, -1))
return torch.from_numpy(transformed_dist.astype(np.float32))
def get_hierarchical_graph(rec, rec_coords_list, c_alpha_coords, n_coords, c_coords, c_alpha_cutoff=20,
c_alpha_max_neighbors=None,
surface_graph_cutoff=10, surface_max_neighbors=None,
surface_mesh_cutoff=1.72):
surface_mesh = get_surface(rec, 'msms -density 1')
rec_coords_concat = np.concatenate(rec_coords_list, axis=0)
distances = spatial.distance.cdist(rec_coords_concat, surface_mesh)
# surface_indices = sorted(list(set(np.argmin(distances, axis=0)))) # use the closest atom instead
surface_indices = sorted(list(set(np.where(distances < surface_mesh_cutoff)[0])))
np_surface_indices = np.array(surface_indices)
c_alpha_to_surface_src = []
c_alpha_to_surface_dst = []
c_alpha_to_surface_distances = []
n_i_list = []
u_i_list = []
v_i_list = []
atom_count = 0
for i, res_coords in enumerate(rec_coords_list):
res_indices = np.arange(len(res_coords)) + atom_count
atom_count += len(res_coords)
# get indices where the surface atom indices of this residue appear in surface_indices (CAREFUL: for this to work, the surface_indices have to be sorted)
index_in_surface_atoms = np.where(np.isin(surface_indices, res_indices))[0]
res_surface_indices = np_surface_indices[index_in_surface_atoms]
c_alpha_to_surface_src.extend(len(index_in_surface_atoms) * [i])
c_alpha_to_surface_dst.extend(list(index_in_surface_atoms))
res_surface_coords = rec_coords_concat[res_surface_indices]
nitrogen = n_coords[i]
c_alpha = c_alpha_coords[i]
carbon = c_coords[i]
c_alpha_to_surface_distances.extend(list(np.linalg.norm((res_surface_coords - c_alpha), axis=1)))
u_i = (nitrogen - c_alpha) / np.linalg.norm(nitrogen - c_alpha)
t_i = (carbon - c_alpha) / np.linalg.norm(carbon - c_alpha)
n_i = np.cross(u_i, t_i) / np.linalg.norm(np.cross(u_i, t_i))
v_i = np.cross(n_i, u_i)
assert (math.fabs(
np.linalg.norm(v_i) - 1.) < 1e-5), "protein utils protein_to_graph_dips, v_i norm larger than 1"
n_i_list.append(n_i)
u_i_list.append(u_i)
v_i_list.append(v_i)
n_i_feat = np.stack(n_i_list, axis=0)
u_i_feat = np.stack(u_i_list, axis=0)
v_i_feat = np.stack(v_i_list, axis=0)
num_residues = len(rec_coords_list)
if num_residues <= 1:
raise ValueError(f"l_or_r contains only 1 residue!")
################### Build the k-NN graph ##############################
surface_coords = rec_coords_concat[surface_indices]
surface_distances = spa.distance.cdist(surface_coords, surface_coords)
surface_src = []
surface_dst = []
surface_edge_distances = []
surface_mean_norms = []
for i in range(len(surface_coords)):
dst = list(np.where(surface_distances[i, :] < surface_graph_cutoff)[0])
dst.remove(i)
if surface_max_neighbors != None and len(dst) > surface_max_neighbors:
dst = list(np.argsort(surface_distances[i, :]))[1: surface_max_neighbors + 1] # closest would be self loop
if len(dst) == 0:
dst = list(np.argsort(surface_distances[i, :]))[1:2] # closest would be the index i itself > self loop
log(
f'The surface_graph_cutoff {surface_graph_cutoff} was too small for one surface atom such that it had no neighbors. So we connected {i} to the closest other surface_atom {dst}')
assert i not in dst
src = | |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility script for checking out subdirectories of many GIT repositories
to specified locations, like is possible with SVN and gclient. This uses a
combination of GIT, sparse-checkout, shallow-clone and filesystem junctions.
For each dependency in a 'gitdeps' file this script will checkout one
subdirectory of one repository into a specified location. The input is as
follows:
- The user specifies a local destination for the checkout.
- The user specifies a source repository.
- The user specifies a list of subdirectories of the repository to get.
- The user specifies a revision.
The checkout works as follows:
- An empty git checkout is initialized in the cache directory. This will be
in a subfolder with an essentially random name.
- The specified repository is added as a remote to that repo.
- A sparse-checkout directive is added to select only the desired
subdirectories.
- The repository is cloned using a depth of 1 (no history, only the actual
contents of the desired revision).
- The destination directories are created as junctions pointing to the
desired subdirectory of the checkout in the cache directory.
The script maintains its state in the root of the cache directory, allowing it
to reuse checkout directories when possible.
"""
import ast
import glob
import hashlib
import logging
import optparse
import os
import random
import re
import subprocess
import threading
_LOGGER = logging.getLogger(os.path.basename(__file__))
# Matches a SHA1 hash used as a git revision.
_GIT_SHA1_RE = re.compile('^[A-Fa-f0-9]{40}$')
def _ParseCommandLine():
"""Parses the command-line and returns an options structure."""
option_parser = optparse.OptionParser()
option_parser.add_option('--cache-dir', type='string',
default='.gitdeps-cache',
help='The directory to be used for storing cache files. Defaults to '
'.gitdeps-cache in the current working directory.')
option_parser.add_option('--output-dir', type='string', default='.',
help='The directory to be used as the root of all output. Defaults to '
'the current working directory.')
option_parser.add_option('--dry-run', action='store_true', default=False,
help='If true then will simply list actions that would be performed.')
option_parser.add_option('--force', action='store_true', default=False,
help='If true then will force the checkout to be completely rebuilt.')
option_parser.add_option('--verbose', dest='log_level', action='store_const',
default=logging.INFO, const=logging.DEBUG,
help='Enables verbose logging.')
option_parser.add_option('--quiet', dest='log_level', action='store_const',
default=logging.INFO, const=logging.ERROR,
help='Disables all output except for errors.')
options, args = option_parser.parse_args()
# Configure logging.
logging.basicConfig(level=options.log_level)
# Set default values.
if not args:
# Default to checking for a file in the current working directory.
_LOGGER.info('Defaulting to using GITDEPS in current working directory.')
args = ['GITDEPS']
# Validate arguments and options.
if not os.path.isdir(options.output_dir):
option_parser.error('Output directory does not exist: %s' %
options.output_dir)
for path in args:
if not os.path.exists(path):
option_parser.error('Missing dependency file: %s' % path)
# Normalize local paths for prettier output.
options.cache_dir = os.path.normpath(os.path.abspath(options.cache_dir))
options.output_dir = os.path.normpath(os.path.abspath(options.output_dir))
return options, args
class RepoOptions(object):
"""Light object used for shuttling around information about a dependency."""
def __init__(self):
self.repository = None
self.revision = None
self.output_dir = None
self.remote_dirs = []
self.deps_file = None
self.checkout_dir = None
self.recurse = False
def __str__(self):
"""Stringifies this object for debugging."""
return ('RepoOptions(repository=%s, revision=%s, output_dir=%s, '
'remote_dirs=%s, deps_file=%s, checkout_dir=%s, recurse=%s)') % (
self.repository.__repr__(),
self.revision.__repr__(),
self.output_dir.__repr__(),
self.remote_dirs.__repr__(),
self.deps_file.__repr__(),
self.checkout_dir.__repr__(),
self.recurse.__repr__())
def _ParseRepoOptions(cache_dir, root_output_dir, deps_file_path, key, value):
"""Given the |root_output_dir| specified on the command line, a |key| and
|value| pair from a GITDEPS file, and the path of the deps file, generates
a corresponding RepoOptions object. The |key| is the output path of the
checkout relative to |root_output_dir|, and |value| consists of a
(repository URL, remote directory, revision hash) tuple. This can raise an
Exception on failure.
"""
bad = False
if ((type(value) != list and type(value) != tuple) or len(value) < 3 or
len(value) > 4 or (type(value[1]) != list and type(value[1]) != tuple)):
bad = True
if len(value) == 4 and type(value[3]) != dict:
bad = True
if bad:
_LOGGER.error('Invalid dependency tuple: %s', value)
raise Exception()
# Always use lowercase SHA1 hashes for consistency.
refspec = value[2]
if _GIT_SHA1_RE.match(refspec):
refspec = refspec.lower()
repo_options = RepoOptions()
repo_options.output_dir = os.path.normpath(os.path.abspath(os.path.join(
root_output_dir, key)))
repo_options.repository = value[0]
repo_options.remote_dirs = value[1]
repo_options.revision = refspec
repo_options.deps_file = deps_file_path
# Parse additional options.
if len(value) > 3:
repo_options.recurse = value[3].get('recurse', False) == True
# Create a unique name for the checkout in the cache directory. Make the
# output directory relative to the cache directory so that they can be
# moved around together.
output_dir_rel = os.path.relpath(repo_options.output_dir,
root_output_dir).lower()
if output_dir_rel.startswith('..'):
raise Exception('Invalid output directory: %s' % key)
n = hashlib.md5(output_dir_rel).hexdigest()
repo_options.checkout_dir = os.path.abspath(os.path.join(cache_dir, n, 'src'))
return repo_options
def _EnsureDirectoryExists(path, comment_name, dry_run):
"""Ensures that the given |path| exists. Only actually creates the directory
if |dry_run| is False. |comment_name| is used during logging of this
operation.
"""
if not comment_name:
comment_name += ' '
else:
comment_name = ''
if not os.path.exists(path):
_LOGGER.debug('Creating %sdirectory: %s', comment_name, path)
if not dry_run:
os.makedirs(path)
def _GetCasedFilename(filename):
"""Returns the full case-sensitive filename for the given |filename|. If the
path does not exist, returns the original |filename| as is.
"""
pattern = '%s[%s]' % (filename[:-1], filename[-1])
filenames = glob.glob(pattern)
if not filenames:
return filename
return filenames[0]
def _Shell(*cmd, **kw):
"""Runs |cmd|, returns the results from Popen(cmd).communicate(). Additional
keyword arguments are passed on to subprocess.Popen. If |stdout| and |stderr|
are not specified, they default to subprocess.PIPE. If |dry_run| is not
specified it defaults to True. The command is only actually run if |dry_run|
is False. This can raise a RuntimeError on failure.
"""
if 'cwd' in kw:
_LOGGER.debug('Executing %s in "%s".', cmd, kw['cwd'])
else:
_LOGGER.debug('Executing %s.', cmd)
if kw.get('dry_run', True):
return ('', '')
kw.pop('dry_run', None)
dump_on_error = kw.pop('dump_on_error', False)
kw['shell'] = True
kw.setdefault('stdout', subprocess.PIPE)
kw.setdefault('stderr', subprocess.PIPE)
prog = subprocess.Popen(cmd, **kw)
stdout, stderr = prog.communicate()
if prog.returncode != 0:
if dump_on_error:
print stdout
print stderr
raise RuntimeError('Command "%s" returned %d.' % (cmd, prog.returncode))
return (stdout, stderr)
def _IsGitCheckoutRoot(path):
"""Return true if the given |path| is the root of a git checkout."""
return os.path.exists(os.path.join(path, '.git'))
# Matches a GIT config file section header, and grabs the name of the section
# in the first group. Used by _GetGitOrigin.
_GIT_CONFIG_SECTION_RE = re.compile(r'^\s*\[(.*?)\]\s*$')
# Matches the URL line from a 'remote' section of a GIT config. Used by
# _GetGitOrigin.
_GIT_CONFIG_REMOTE_URL_RE = re.compile(r'^\s*url\s*=\s*(.*?)\s*$')
def _GetGitOrigin(path):
"""Returns the URL of the 'origin' remote for the git repo in |path|. Returns
None if the 'origin' remote doesn't exist. Raises an IOError if |path| doesn't
exist or is not a git repo.
"""
section = None
for line in open(os.path.join(path, '.git', 'config'), 'rb'):
m = _GIT_CONFIG_SECTION_RE.match(line)
if m:
section = m.group(1)
continue
# We only care about the 'origin' configuration.
if section != 'remote "origin"':
continue
m = _GIT_CONFIG_REMOTE_URL_RE.match(line)
if m:
return m.group(1).strip()
return None
def _GetGitHead(path):
"""Returns the hash of the head of the git repo in |path|. Raises an IOError
if |path| doesn't exist or is not a git repo.
"""
return open(os.path.join(path, '.git', 'HEAD'), 'rb').read().strip()
def _NormalizeGitPath(path):
"""Given a |path| in a GIT repository (relative to its root), normalizes it so
it will match only that exact path in a sparse checkout.
"""
path = path.strip()
if not path.startswith('/'):
path = '/' + path
if not path.endswith('/'):
path += '/'
return path
def _RenameCheckout(path, dry_run):
"""Renames the checkout in |path| so that it can be subsequently deleted.
Only actually does the work if |dry_run| is False. Returns the path of the
renamed checkout directory. Raises an Exception on failure.
"""
def _RenameCheckoutImpl(path, dry_run):
if dry_run:
return path + '-old-dryrun'
attempts = 0
while attempts < 10:
newpath = '%s-old-%04d' % (path, random.randint(0, 999))
try:
os.rename(path, newpath)
return newpath
except WindowsError:
attempts += 1
raise Exception('Unable to rename checkout directory: %s' % path)
newpath = _RenameCheckoutImpl(path, dry_run)
_LOGGER.debug('Renamed checkout directory: %s', newpath)
return newpath
def _DeleteCheckout(path, dry_run):
"""Deletes the checkout in |path|. Only actually deletes the checkout if
| |
# stacking.py
# module: vespy.stacking
# Functions for applying various stacking methods to seismic data
from vespy.utils import get_station_coordinates
import numpy as np
import scipy.signal as sig
import cmath
def degrees_to_radians(theta):
return theta * np.pi / 180
def resolve_slowness_vector(s, baz):
'''
Resolves a scalar slowness and backazimuth into the x and y components of the two-dimensional slowness vector.
Parameters
----------
s : float
Magnitude of slowness vector, in s / km
baz : float
Backazimuth of slowness vector, (i.e. angle from North back to epicentre of event)
Returns
-------
(s_x, s_y) : tuple
Tuple containing the magnitude of the x and y components of the 2d slowness vector, in s / km.
'''
baz_rad = np.deg2rad(baz)
s_x = s * np.sin(baz_rad)
s_y = s * np.cos(baz_rad)
return s_x, s_y
def get_shifts(st, s, baz):
'''
Calculates the shifts (as an integer number of samples in the time series) for every station in a stream of time series seismograms for a slowness vector of given magnitude and backazimuth.
The shift is that which needs to be applied in order to align an arrival (arriving with slowness s and backazimuth baz) with the same arrival at the array reference point (the location of the station that makes up the first trace in the stream).
Parameters
----------
st : ObsPy Stream object
Stream of SAC format seismograms for the seismic array, length K = no. of stations in array
s : float
Magnitude of slowness vector, in s / km
baz : float
Backazimuth of slowness vector, (i.e. angle from North back to epicentre of event)
Returns
-------
shifts : list
List of integer delays at each station in the array, also length K
'''
theta = [] # Angular position of each station, measured clockwise from North
r = [] # Distance of each station
# First station is reference point, so has zero position vector
theta.append(0.0)
r.append(0.0)
geometry = get_station_coordinates(st)/1000. # in km
# For each station, get distance from array reference point (first station), and the angular displacement clockwise from north
for station in geometry[1:]:
r_x = station[0] # x-component of position vector
r_y = station[1] # y-component of position vector
# theta is angle c/w from North to position vector of station; need to compute diffently for each quadrant
if r_x == 0 and r_y == 0:
theta.append(0.0)
elif r_x > 0 and r_y == 0:
theta.append(90.0)
elif r_x < 0 and r_y == 0:
theta.append(270.0)
elif r_x >= 0 and r_y > 0:
theta.append(np.degrees(np.arctan(r_x/r_y)))
elif r_x >= 0 and r_y < 0:
theta.append(180.0 + np.degrees(np.arctan(r_x/r_y)))
elif r_x < 0 and r_y < 0:
theta.append(180.0 + np.degrees(np.arctan(r_x/r_y)))
else:
theta.append(360.0 + np.degrees(np.arctan(r_x/r_y)))
r.append(np.sqrt(r_x**2 + r_y**2))
# Find angle between station position vector and slowness vector in order to compute dot product
# Angle between slowness and position vectors, measured clockwise
phi = [180 - baz + th for th in theta]
sampling_rate = st[0].stats.sampling_rate
shifts = []
# Shift is dot product. The minus sign is because a positive time delay needs to be corrected by a negative shift in order to stack
for i in range(0, len(st)):
shifts.append(-1 * int(round(r[i] * s * np.cos(np.radians(phi[i])) * sampling_rate)))
return shifts
def get_shifts_3d(st, s, theta, baz):
'''
Calculates the shifts (as an integer number of samples in the time series) for every station in a stream of time series seismograms for a slowness vector of given magnitude and backazimuth.
Takes account of the full 3d slowness vector in order to factor in station elevations.
The shift is that which needs to be applied in order to align an arrival (arriving with slowness s and backazimuth baz) with the same arrival at the array reference point (the location of the station that makes up the first trace in the stream).
Parameters
----------
st : ObsPy Stream object
Stream of SAC format seismograms for the seismic array, length K = no. of stations in array
s : float
Horizontal slowness in s / km
theta : float
Angle of incidence in degrees
baz : float
Backazimuth of slowness vector, (i.e. angle from North back to epicentre of event)
Returns
-------
shifts : list
List of integer delays at each station in the array, also length K
'''
shifts = []
r = [] # Displacemnt of each station from centre
# First station is reference point, so has zero position vector
#r.append(0.0)
sampling_rate = st[0].stats.sampling_rate
geometry = get_station_coordinates(st)/1000. # in km
# 3D Slowness vector
s_x, s_y = resolve_slowness_vector(s, baz)
s_z = s / np.tan(degrees_to_radians(theta))
#shifts.append(0)
# For each station, get distance from array reference point (first station), and the angular displacement clockwise from north
for station in geometry:
r_x = station[0] # x-component of position vector
r_y = station[1] # y-component of position vector
r_z = station[2] # z-component of position vector
delta_t = np.dot([r_x, r_y, r_z], [s_x, s_y, s_z])
shift = int(round(delta_t * sampling_rate))
shifts.append(shift)
return shifts
def linear_stack(st, s, baz):
'''
Returns the linear (delay-and-sum) stack for a seismic array, for a beam of given slowness and backazimuth.
Parameters
----------
st : ObsPy Stream object
Stream of SAC format seismograms for the seismic array, length K = no. of stations in array
s : float
Magnitude of slowness vector, in s / km
baz : float
Backazimuth of slowness vector, (i.e. angle from North back to epicentre of event)
Returns
-------
stack : NumPy array
The delay-and-sum beam at the given slowness and backazimuth, as a time series.
'''
# Check that each channel has the same number of samples, otherwise we can't construct the beam properly
assert len(set([len(tr) for tr in st])) == 1, "Traces in stream have different lengths, cannot stack."
nsta = len(st)
shifts = get_shifts(st, s, baz)
shifted_st = st.copy()
for i, tr in enumerate(shifted_st):
tr.data = np.roll(tr.data, shifts[i])
stack = np.sum([tr.data for tr in shifted_st], axis=0) / nsta
return stack
def nth_root_stack(st, s, baz, n):
'''
Returns the nth root stack for a seismic array, for a beam of given slowness and backazimuth.
Parameters
----------
st : ObsPy Stream object
Stream of SAC format seismograms for the seismic array, length K = no. of stations in array
s : float
Magnitude of slowness vector, in s / km
baz : float
Backazimuth of slowness vector, (i.e. angle from North back to epicentre of event)
n : int
Order of the nth root process (n=1 just yields the linear vespa)
Returns
-------
stack : NumPy array
The nth root beam at the given slowness and backazimuth, as a time series.
'''
# Check that each channel has the same number of samples, otherwise we can't construct the beam properly
assert len(set([len(tr) for tr in st])) == 1, "Traces in stream have different lengths, cannot stack."
nsta = len(st)
shifts = get_shifts(st, s, baz)
stack = np.zeros(st[0].data.shape)
for i, tr in enumerate(st):
stack += np.roll(pow(abs(tr.data), 1./n) * np.sign(tr.data), shifts[i]) # Shift data in each trace by its offset
stack /= nsta
stack = pow(abs(stack), n) * np.sign(stack)
return stack
def phase_weighted_stack(st, s, baz, n=1):
'''
Calculates the phase-weighted stack for seismograms in the stream. n is the order of the phase-weighting.
n should be an integer >= 0. n = 0 corresponds with no phase weighting, i.e. just the linear stack.
Parameters
----------
st: ObsPy Stream object
The stream of seismograms for the array for a particular event
s : float
Magnitude of slowness vector, in s / km
baz : float
Backazimuth of slowness vector, (i.e. angle from North back to epicentre of event)
n : number
Order of the phase-weighted stacking to be applied, default 1. Should be int, n >= 0.
Returns
-------
stack : NumPy array
Phase-weighted stack for the given event at the array
Notes
-----
The phase-weighted stack weights the data from each seismogram by its | |
<reponame>highmore9501/fretDance<filename>chordToFinger.py
from calculate import arrangeNotesInChord
def copyNewDancer(dancer):
"""
复制原来的dancer,并且把手指都抬起来
:param dancer:
:return:
"""
import copy
newDancer = copy.deepcopy(dancer)
newDancer.releaseFingers()
return newDancer
def getChordList(chordPosition):
"""
处理和弦音符位置chordPosition,把它分解成需要按的音符位置chordList,和不要按的空弦音noPress,方便后续处理
:param chordPosition:
:return:
"""
chordList = list(chordPosition)
chordLength = len(chordList)
noPress = []
for i in range(chordLength - 1, -1, -1):
if chordList[i][1] == 0:
noPress.append(chordList.pop(i))
return chordList, noPress
def fingerNoteComb(dancer, chordPosition, fingerList, usedFinger=None, ESN=None):
"""
:param ESN: empty string note 空弦音
原来母和弦里的空弦音,和这个函数里的Chord不同,这个函数里的Chord已经过滤掉空弦音了
:param usedFinger: 其它使用过的手指列表
:param dancer: 原始dancer
:param chordPosition: 多指需要按的音符位置列表,中间不包含空弦音
:param fingerList: 可以用到的手指列表,例如[2,3,4]表示利用2/3/4指
:return: 所有单按完以后生成的dancer列表
"""
if ESN is None:
ESN = []
if usedFinger is None:
usedFinger = []
result = []
resultAppend = result.append
noteNumber = len(chordPosition)
realFingerList = fingerList + usedFinger
from itertools import combinations
import copy
for fingerComb in combinations(fingerList, noteNumber):
newDancer = copy.deepcopy(dancer)
for i in range(noteNumber):
newDancer.fingerMoveTo(fingerComb[i], chordPosition[i][0], chordPosition[i][1])
newDancer.recordTrace(realFingerList, ESN)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger00(dancer, chordPosition):
"""处理[0],也就是全部空弦音的情况,输出结果1个"""
newDancer = copyNewDancer(dancer)
newTrace = []
for [string, fret] in chordPosition:
newTrace.append([string, 0])
newDancer.traceNote.append(newTrace)
newDancer.traceFinger.append([0])
return newDancer
def chord2Finger01(dancer, chordPosition):
"""处理[1],输出结果4个,分别用1/2/3/4指单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
string = chordList[0][0]
fret = chordList[0][1]
for i in range(4):
newDancer = copyNewDancer(dancer)
newDancer.fingerMoveTo(i + 1, string, fret)
newDancer.recordTrace([i + 1], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger02(dancer, chordPosition):
"""处理[2],输出结果3个,输出结果4个,就是1/3/4指大横按或1指小横按,
加上输出结果6个,4指对2点组合单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
fret = chordList[0][1]
for i in range(2): # 1指大横按
for string in range(chordList[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, fret, i + 2)
newDancer.recordTrace([1], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(2): # 34指大横按
for string in range(chordList[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(i + 2, string, fret, 2)
newDancer.recordTrace([i + 2], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
newDancer = copyNewDancer(dancer)
singlePressDancer = fingerNoteComb(newDancer, chordPosition, [1, 2, 3, 4], ESN=noPress) # 1/2/3/4指单按和弦里的2个音
result += singlePressDancer
return result
def chord2Finger03(dancer, chordPosition):
"""处理[1,1],输出结果6个,4指对2点组合单按"""
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
newDancer = copyNewDancer(dancer)
result = fingerNoteComb(newDancer, newChordByFret, [1, 2, 3, 4], ESN=noPress)
return result
def chord2Finger04(dancer, chordPosition):
"""处理[3],输出结果4个,就是1/3/4指大横按或1指小横按,
加上输出结果4个,4指对3点组合单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByString = arrangeNotesInChord(chordList, 'string')
fret = newChordByString[0][1]
for i in range(2): # 1指大小横按
for string in range(newChordByString[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, fret, i + 2)
newDancer.recordTrace([1], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(2): # 34指大横按
for string in range(newChordByString[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(i + 2, string, fret, 2)
newDancer.recordTrace([i + 2], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
newDancer = copyNewDancer(dancer)
singlePressDancer = fingerNoteComb(newDancer, newChordByString, [1, 2, 3, 4], ESN=noPress) # 4指对3点组合单按
result += singlePressDancer
return result
def chord2Finger05(dancer, chordPosition):
"""处理[2,1],输出结果6个,1指横按/小横按,2/3/4指单按;
加上出结果4个,4指对3点组合单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
fret = newChordByFret[0][1]
for i in range(2): # 1指大小横按最低品,2/3/4指单按最高品
for fingerNumber in range(2, 5):
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, fret, i + 2)
newDancer.fingerMoveTo(fingerNumber, newChordByFret[2][0], newChordByFret[2][1])
newDancer.recordTrace([1, fingerNumber], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
newDancer = copyNewDancer(dancer)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret, [1, 2, 3, 4], ESN=noPress) # 4指对3点组合单按
result += singlePressDancer
return result
def chord2Finger06(dancer, chordPosition):
"""处理[1,2],输出结果2个,3指大横按,1/2指单按;
加上输出结果3个,4指小横按,1/2/3指单按;
加上出结果4个,4指对3点组合单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
fret = newChordByFret[1][1]
for fingerNumber in range(1, 3): # 3指大横按,1/2指单按
for string in range(newChordByFret[1][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(3, string, fret, 2)
newDancer.fingerMoveTo(fingerNumber, newChordByFret[0][0], newChordByFret[0][1])
newDancer.recordTrace([3, fingerNumber], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for fingerNumber in range(1, 4): # 4指大横按,1/2/3指单按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(4, string, fret, 2)
newDancer.fingerMoveTo(fingerNumber, newChordByFret[0][0], newChordByFret[0][1])
newDancer.recordTrace([4, fingerNumber], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
newDancer = copyNewDancer(dancer)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret, [1, 2, 3, 4], ESN=noPress) # 4指对3点组合单按
result += singlePressDancer
return result
def chord2Finger07(dancer, chordPosition):
"""处理[1,1,1],输出结果4个,品格从低到高分别用1/2/3/4指,单按3个音"""
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
newDancer = copyNewDancer(dancer)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret, [1, 2, 3, 4], ESN=noPress) # 4指对3点组合单按
return singlePressDancer
def chord2Finger08(dancer, chordPosition):
"""处理[4],[5],[6],输出结果1个,就是1指横按"""
chordList, noPress = getChordList(chordPosition)
for string in range(chordList[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, chordList[0][1], 2)
newDancer.recordTrace([1], noPress)
if newDancer.validation(chordPosition):
return newDancer
def chord2Finger09(dancer, chordPosition):
"""处理[1,3],输出结果1个,1指按最低品,2/3/4指根据弦数从低到高单按;
加上输出结果2个,3指小横按,1/2指单按;加上输出结果3个,4指小横按,1/2/3指单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
fret = newChordByFret[1][1]
for fingerNumber in range(1, 3): # 3指大横按,1/2指单按
for string in range(newChordByFret[1][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(3, string, fret, 2)
newDancer.fingerMoveTo(fingerNumber, newChordByFret[0][0], newChordByFret[0][1])
newDancer.recordTrace([3, fingerNumber], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for fingerNumber in range(1, 4): # 4指大横按,1/2/3指单按
for string in range(newChordByFret[1][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(4, string, fret, 2)
newDancer.fingerMoveTo(fingerNumber, newChordByFret[0][0], newChordByFret[0][1])
newDancer.recordTrace([4, fingerNumber], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(1): # 1234指对四个音单按
newDancer = copyNewDancer(dancer)
newDancer.fingerMoveTo(1, newChordByFret[0][0], newChordByFret[0][1])
newDancer.fingerMoveTo(2, newChordByFret[1][0], fret)
newDancer.fingerMoveTo(3, newChordByFret[2][0], fret)
newDancer.fingerMoveTo(4, newChordByFret[2][0], fret)
newDancer.recordTrace([1, 2, 3, 4], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger10(dancer, chordPosition):
"""处理[2,2],输出结果1个,1/2指按2个低音,3/4指按2个高音,
加上输出6个结果,1指大/小横按,23/24/34指单按2个单音,
加上输出2个结果,1指大横按,3/4指小横按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(0, 1): # 1指大/小横按,2/3/4指单按2个单音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], i + 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[2:], [2, 3, 4], ESN=noPress)
result += singlePressDancer
for i in range(0, 1): # 1指大横按,3/4指小横按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(i + 3, newChordByFret[2][0], newChordByFret[2][1], 3)
newDancer.recordTrace([1, i + 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
newDancer = copyNewDancer(dancer)
for i in range(4): # 1/2指按2个低音,3/4指按2个高音
newDancer.fingerMoveTo(i + 1, newChordByFret[i][0], newChordByFret[i][1])
newDancer.recordTrace([1, 2, 3, 4], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger11(dancer, chordPosition):
"""处理[3,1],[4,1],[5,1]输出结果3个,1指大横按,2/3/4指单按"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer) # 1指大横按
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
singlePressDancer = fingerNoteComb(newDancer, [newChordByFret[-1]], [2, 3, 4], ESN=noPress) # 2/3/4指对1点组合单按
result += singlePressDancer
return result
def chord2Finger12(dancer, chordPosition):
"""处理[1,1,2],[1,1,3],输出结果2个,3/4指大横按,1/2指单按两个音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(2):
for string in range(newChordByFret[2][0], 7):
newDancer = copyNewDancer(dancer) # 3/4指大横按
newDancer.changeBarre(i + 3, string, newChordByFret[2][1], 2)
singlePressDancer = fingerNoteComb(newDancer, [newChordByFret[:1]], [1, 2], ESN=noPress) # 1,2指对2点组合单按
result += singlePressDancer
return result
def chord2Finger13(dancer, chordPosition):
"""处理[1,2,1],[1,1,1,1],输出结果1个,品格从低到高分别用1234"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
newDancer = copyNewDancer(dancer)
for i in range(4):
newDancer.fingerMoveTo(i + 1, newChordByFret[i][0], newChordByFret[i][1])
newDancer.recordTrace([1, 2, 3, 4], noPress)
if newDancer.validation(chordPosition):
result.append(newDancer)
return result
def chord2Finger14(dancer, chordPosition):
"""处理[2,1,1],[3,1,1],输出结果6个,1指横按/小横按,2/3/4指按2个单音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
# 1指大横按,2/3/4指单按2个单音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[-2:], [2, 3, 4], [1], noPress)
result += singlePressDancer
return result
def chord2Finger15(dancer, chordPosition):
"""处理[3,1,1,1],[2,1,1,1],输出结果2个,1指大/小横按,2/3/4指单按3个音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(0, 1): # 1指大/小横按,2/3/4指单按2个单音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], i + 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[i + 2:], [2, 3, 4], [1], noPress)
result += singlePressDancer
return result
def chord2Finger16(dancer, chordPosition):
"""处理[1,4],输出结果4个,3/4指大横按,1/2指单按低音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(0, 1): # 3/4指大横按,1/2指单按低音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(i + 3, string, newChordByFret[0][1], 2)
singlePressDancer = fingerNoteComb(newDancer, [newChordByFret[0]], [1, 2], [i + 3], noPress)
result += singlePressDancer
return result
def chord2Finger17(dancer, chordPosition):
"""处理[2,3],输出结果2个,1指大横按,3/4指大横按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(0, 1): # 1指大横按,3/4指小横按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(i + 3, newChordByFret[2][0], newChordByFret[2][1], 3)
newDancer.recordTrace([1, i + 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger18(dancer, chordPosition):
"""处理[3,2],输出结果2个,1指大横按,3/4指大横按,
加上输出6个结果,1指大/小横按,23/24/34指单按2个单音"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(0, 1): # 1指大横按,3/4指小横按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(i + 3, newChordByFret[2][0], newChordByFret[2][1], 3)
newDancer.recordTrace([1, i + 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(0, 1): # 1指大/小横按,2/3/4指单按2个单音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], i + 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[3:], [2, 3, 4], [1], noPress)
result += singlePressDancer
return result
def chord2Finger19(dancer, chordPosition):
"""处理[4,2],输出结果2个,1指大横按,3/4指大横按,加上输出3个结果,1指大横按,23/24/34指单按2个单音"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[4:], | |
self.blockSignals(False)
return
except Exception as e:
log.debug(str(e))
self.app.inform.emit('[success] %s' % _("Tool(s) deleted from Tool Table."))
self.blockSignals(False)
self.build_ui()
def on_generate_buffer(self):
self.app.inform.emit('[WARNING_NOTCL] %s...' % _("Buffering solid geometry"))
self.obj_name = self.ui.object_combo.currentText()
# Get source object.
try:
self.grb_obj = self.app.collection.get_by_name(self.obj_name)
except Exception as e:
self.app.inform.emit('[ERROR_NOTCL] %s: %s' % (_("Could not retrieve object"), str(self.obj_name)))
return "Could not retrieve object: %s with error: %s" % (self.obj_name, str(e))
if self.grb_obj is None:
self.app.inform.emit('[ERROR_NOTCL] %s: %s' % (_("Object not found"), str(self.obj_name)))
return
def buffer_task(app_obj):
with app_obj.proc_container.new('%s...' % _("Buffering")):
if isinstance(self.grb_obj.solid_geometry, list):
self.grb_obj.solid_geometry = MultiPolygon(self.grb_obj.solid_geometry)
self.grb_obj.solid_geometry = self.grb_obj.solid_geometry.buffer(0.0000001)
self.grb_obj.solid_geometry = self.grb_obj.solid_geometry.buffer(-0.0000001)
app_obj.inform.emit('[success] %s' % _("Done."))
self.grb_obj.plot_single_object.emit()
self.app.worker_task.emit({'fcn': buffer_task, 'params': [self.app]})
def on_iso_button_click(self):
self.obj_name = self.ui.object_combo.currentText()
# Get source object.
try:
self.grb_obj = self.app.collection.get_by_name(self.obj_name)
except Exception:
self.app.inform.emit('[ERROR_NOTCL] %s: %s' % (_("Could not retrieve object"), str(self.obj_name)))
return
if self.grb_obj is None:
self.app.inform.emit('[ERROR_NOTCL] %s: %s' % (_("Object not found"), str(self.obj_name)))
return
if self.ui.valid_cb.get_value() is True:
self.find_safe_tooldia_multiprocessing()
def worker_task(iso_obj):
with self.app.proc_container.new('%s ...' % _("Isolating")):
self.isolate_handler(iso_obj)
self.app.worker_task.emit({'fcn': worker_task, 'params': [self.grb_obj]})
def follow_geo(self, followed_obj, outname):
"""
Creates a geometry object "following" the gerber paths.
:param followed_obj: Gerber object for which to generate the follow geometry
:type followed_obj: AppObjects.FlatCAMGerber.GerberObject
:param outname: Nme of the resulting Geometry object
:type outname: str
:return: None
"""
def follow_init(follow_obj, app_obj):
# Propagate options
follow_obj.options["cnctooldia"] = str(tooldia)
follow_obj.solid_geometry = self.grb_obj.follow_geometry
app_obj.inform.emit('[success] %s.' % _("Following geometry was generated"))
# in the end toggle the visibility of the origin object so we can see the generated Geometry
followed_obj.ui.plot_cb.set_value(False)
follow_name = outname
for tool in self.iso_tools:
tooldia = self.iso_tools[tool]['tooldia']
new_name = "%s_%.*f" % (follow_name, self.decimals, tooldia)
follow_state = self.iso_tools[tool]['data']['tools_iso_follow']
if follow_state:
ret = self.app.app_obj.new_object("geometry", new_name, follow_init)
if ret == 'fail':
self.app.inform.emit("[ERROR_NOTCL] %s: %.*f" % (
_("Failed to create Follow Geometry with tool diameter"), self.decimals, tooldia))
else:
self.app.inform.emit("[success] %s: %.*f" % (
_("Follow Geometry was created with tool diameter"), self.decimals, tooldia))
def isolate_handler(self, isolated_obj):
"""
Creates a geometry object with paths around the gerber features.
:param isolated_obj: Gerber object for which to generate the isolating routing geometry
:type isolated_obj: AppObjects.FlatCAMGerber.GerberObject
:return: None
"""
selection = self.ui.select_combo.get_value()
if selection == 0: # ALL
self.isolate(isolated_obj=isolated_obj)
elif selection == 1: # Area Selection
self.app.inform.emit('[WARNING_NOTCL] %s' % _("Click the start point of the area."))
if self.app.is_legacy is False:
self.app.plotcanvas.graph_event_disconnect('mouse_press', self.app.on_mouse_click_over_plot)
self.app.plotcanvas.graph_event_disconnect('mouse_move', self.app.on_mouse_move_over_plot)
self.app.plotcanvas.graph_event_disconnect('mouse_release', self.app.on_mouse_click_release_over_plot)
else:
self.app.plotcanvas.graph_event_disconnect(self.app.mp)
self.app.plotcanvas.graph_event_disconnect(self.app.mm)
self.app.plotcanvas.graph_event_disconnect(self.app.mr)
self.mr = self.app.plotcanvas.graph_event_connect('mouse_release', self.on_mouse_release)
self.mm = self.app.plotcanvas.graph_event_connect('mouse_move', self.on_mouse_move)
self.kp = self.app.plotcanvas.graph_event_connect('key_press', self.on_key_press)
# disconnect flags
self.area_sel_disconnect_flag = True
elif selection == 2: # Polygon Selection
# disengage the grid snapping since it may be hard to click on polygons with grid snapping on
if self.app.ui.grid_snap_btn.isChecked():
self.grid_status_memory = True
self.app.ui.grid_snap_btn.trigger()
else:
self.grid_status_memory = False
self.app.inform.emit('[WARNING_NOTCL] %s' % _("Click on a polygon to isolate it."))
self.mr = self.app.plotcanvas.graph_event_connect('mouse_release', self.on_poly_mouse_click_release)
self.kp = self.app.plotcanvas.graph_event_connect('key_press', self.on_key_press)
if self.app.is_legacy is False:
self.app.plotcanvas.graph_event_disconnect('mouse_release',
self.app.on_mouse_click_release_over_plot)
else:
self.app.plotcanvas.graph_event_disconnect(self.app.mr)
# disconnect flags
self.poly_sel_disconnect_flag = True
elif selection == 3: # Reference Object
ref_obj = self.app.collection.get_by_name(self.ui.reference_combo.get_value())
ref_geo = unary_union(ref_obj.solid_geometry)
use_geo = unary_union(isolated_obj.solid_geometry).difference(ref_geo)
self.isolate(isolated_obj=isolated_obj, geometry=use_geo)
def isolate(self, isolated_obj, geometry=None, limited_area=None, negative_dia=None, plot=True):
"""
Creates an isolation routing geometry object in the project.
:param isolated_obj: Gerber object for which to generate the isolating routing geometry
:type isolated_obj: AppObjects.FlatCAMGerber.GerberObject
:param geometry: specific geometry to isolate
:type geometry: List of Shapely polygon
:param limited_area: if not None isolate only this area
:type limited_area: Shapely Polygon or a list of them
:param negative_dia: isolate the geometry with a negative value for the tool diameter
:type negative_dia: bool
:param plot: if to plot the resulting geometry object
:type plot: bool
:return: None
"""
combine = self.ui.combine_passes_cb.get_value()
tools_storage = self.iso_tools
sorted_tools = []
table_items = self.ui.tools_table.selectedItems()
sel_rows = {t.row() for t in table_items}
for row in sel_rows:
tid = int(self.ui.tools_table.item(row, 3).text())
sorted_tools.append(tid)
if not sorted_tools:
self.app.inform.emit('[ERROR_NOTCL] %s' % _("There are no tools selected in the Tool Table."))
return 'fail'
# update the Common Parameters values in the self.iso_tools
for tool_iso in self.iso_tools:
for key in self.iso_tools[tool_iso]:
if key == 'data':
self.iso_tools[tool_iso][key]["tools_iso_rest"] = self.ui.rest_cb.get_value()
self.iso_tools[tool_iso][key]["tools_iso_combine_passes"] = combine
self.iso_tools[tool_iso][key]["tools_iso_isoexcept"] = self.ui.except_cb.get_value()
self.iso_tools[tool_iso][key]["tools_iso_selection"] = self.ui.select_combo.get_value()
self.iso_tools[tool_iso][key]["tools_iso_area_shape"] = self.ui.area_shape_radio.get_value()
if combine:
if self.ui.rest_cb.get_value():
self.combined_rest(iso_obj=isolated_obj, iso2geo=geometry, tools_storage=tools_storage,
lim_area=limited_area, negative_dia=negative_dia, plot=plot)
else:
self.combined_normal(iso_obj=isolated_obj, iso2geo=geometry, tools_storage=tools_storage,
lim_area=limited_area, negative_dia=negative_dia, plot=plot)
else:
prog_plot = self.app.defaults["tools_iso_plotting"]
for tool in sorted_tools:
tool_data = tools_storage[tool]['data']
to_follow = tool_data['tools_iso_follow']
work_geo = geometry
if work_geo is None:
work_geo = isolated_obj.follow_geometry if to_follow else isolated_obj.solid_geometry
iso_t = {
'ext': 0,
'int': 1,
'full': 2
}[tool_data['tools_iso_isotype']]
passes = tool_data['tools_iso_passes']
overlap = tool_data['tools_iso_overlap']
overlap /= 100.0
milling_type = tool_data['tools_iso_milling_type']
iso_except = self.ui.except_cb.get_value()
for i in range(passes):
tool_dia = tools_storage[tool]['tooldia']
tool_type = tools_storage[tool]['tool_type']
iso_offset = tool_dia * ((2 * i + 1) / 2.0000001) - (i * overlap * tool_dia)
if negative_dia:
iso_offset = -iso_offset
outname = "%s_%.*f" % (isolated_obj.options["name"], self.decimals, float(tool_dia))
if passes > 1:
iso_name = outname + "_iso" + str(i + 1)
if iso_t == 0:
iso_name = outname + "_ext_iso" + str(i + 1)
elif iso_t == 1:
iso_name = outname + "_int_iso" + str(i + 1)
else:
iso_name = outname + "_iso"
if iso_t == 0:
iso_name = outname + "_ext_iso"
elif iso_t == 1:
iso_name = outname + "_int_iso"
# if milling type is climb then the move is counter-clockwise around features
mill_dir = 1 if milling_type == 'cl' else 0
iso_geo = self.generate_envelope(iso_offset, mill_dir, geometry=work_geo, env_iso_type=iso_t,
follow=to_follow, nr_passes=i, prog_plot=prog_plot)
if iso_geo == 'fail':
self.app.inform.emit('[ERROR_NOTCL] %s' % _("Isolation geometry could not be generated."))
continue
# ############################################################
# ########## AREA SUBTRACTION ################################
# ############################################################
if iso_except:
self.app.proc_container.update_view_text(' %s' % _("Subtracting Geo"))
iso_geo = self.area_subtraction(iso_geo)
if limited_area:
self.app.proc_container.update_view_text(' %s' % _("Intersecting Geo"))
iso_geo = self.area_intersection(iso_geo, intersection_geo=limited_area)
# make sure that no empty geometry element is in the solid_geometry
new_solid_geo = [geo for geo in iso_geo if not geo.is_empty]
tool_data.update({
"name": iso_name,
})
def iso_init(geo_obj, fc_obj):
# Propagate options
geo_obj.options["cnctooldia"] = str(tool_dia)
geo_obj.solid_geometry = deepcopy(new_solid_geo)
# ############################################################
# ########## AREA SUBTRACTION ################################
# ############################################################
if self.ui.except_cb.get_value():
self.app.proc_container.update_view_text(' %s' % _("Subtracting Geo"))
geo_obj.solid_geometry = self.area_subtraction(geo_obj.solid_geometry)
geo_obj.tools = {'1': {}}
geo_obj.tools.update({
'1': {
'tooldia': float(tool_dia),
'offset': 'Path',
'offset_value': 0.0,
'type': 'Rough',
'tool_type': tool_type,
'data': tool_data,
'solid_geometry': geo_obj.solid_geometry
}
})
# detect if solid_geometry is empty and this require list flattening which is "heavy"
# or just looking in the lists (they are one level depth) and if any is not empty
# proceed with object creation, if there are empty and the number of them is the length
# of the list then we have an empty solid_geometry which should raise a Custom Exception
empty_cnt = 0
if not isinstance(geo_obj.solid_geometry, list):
geo_obj.solid_geometry = [geo_obj.solid_geometry]
for g in geo_obj.solid_geometry:
if g:
break
else:
empty_cnt += 1
if empty_cnt == len(geo_obj.solid_geometry):
fc_obj.inform.emit('[ERROR_NOTCL] %s: %s' % (
_("Empty Geometry in"), geo_obj.options["name"]))
return 'fail'
else:
fc_obj.inform.emit('[success] %s: %s' %
(_("Isolation geometry created"), geo_obj.options["name"]))
geo_obj.multigeo = True
self.app.app_obj.new_object("geometry", iso_name, iso_init, plot=plot)
# clean the progressive plotted shapes if it was used
if prog_plot == 'progressive':
self.temp_shapes.clear(update=True)
# Switch notebook to Properties page
self.app.ui.notebook.setCurrentWidget(self.app.ui.properties_tab)
def combined_rest(self, iso_obj, iso2geo, tools_storage, lim_area, negative_dia=None, plot=True):
"""
Isolate the provided Gerber object using "rest machining" strategy
:param iso_obj: the isolated Gerber object
:type iso_obj: AppObjects.FlatCAMGerber.GerberObject
:param iso2geo: specific geometry to isolate
:type iso2geo: list of Shapely Polygon
:param tools_storage: a dictionary that holds the tools and geometry
:type tools_storage: dict
:param lim_area: if not None restrict isolation to this area
:type lim_area: Shapely Polygon or a list of them
:param negative_dia: isolate the geometry with a negative value for the tool diameter
:type negative_dia: bool
:param plot: if to plot the resulting geometry object
:type plot: bool
:return: Isolated solid geometry
:rtype:
"""
log.debug("ToolIsolation.combine_rest()")
total_solid_geometry = []
iso_name = iso_obj.options["name"] + '_iso_rest'
work_geo = iso_obj.solid_geometry if iso2geo is None else iso2geo
# sorted_tools = []
# for k, v in self.iso_tools.items():
# sorted_tools.append(float('%.*f' % (self.decimals, float(v['tooldia']))))
sorted_tools = []
table_items = self.ui.tools_table.selectedItems()
sel_rows = {t.row() for t in table_items}
for row in sel_rows:
try:
tdia = float(self.ui.tools_table.item(row, 1).text())
except ValueError:
# try to convert comma to | |
"""
Morgan.
authors: <NAME> and <NAME>
contact: dangeles at caltech edu
"""
import pandas as pd
import warnings as wng
import numpy as np
import pymc3 as pm
# import theano
###############################################################################
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
###############################################################################
class hunt(object):
"""morgan objects are used for genetic analysis using RNA-seq.
Each genotype can be associated with two attributes: Read counts
and log(fold-change). These attributes are provided in two
different dataframes. If you provide a dataframe with fold-change
(not log-foldchange) certain functions will not work correctly!
Attributes:
------------------
gene
change
counts
qval
q
"""
def __init__(self, gene, change, counts, qval, q=0.1):
"""
The initialize function.
Params:
gene
change
counts
qval
q
"""
if not gene:
raise ValueError('`gene` cannot be empty')
if not change:
raise ValueError('`change` cannot be empty')
if not counts:
raise ValueError('`counts` cannot be empty')
if not qval:
raise ValueError('`qval` cannot be empty')
if type(gene) is not str:
raise ValueError('`gene` must be a string')
if type(change) is not str:
raise ValueError('`change` must be a string')
if type(counts) is not str:
raise ValueError('`counts` must be a string')
if type(qval) is not str:
raise ValueError('`qval` must be a string')
if type(q) is not float:
raise ValueError('`q` must be a float')
if q <= 0 or q >= 1:
raise ValueError('`q` must be between 0 and 1')
self.gene = gene
self.change = change
self.counts = counts
self.qval = qval
self.q = q
self.single_mutants = []
self.double_muts = {}
self.beta = None
def add_single_mutant(self, single):
"""
Add a single mutant to the list.
Params:
single - str or listlike
Note: ALL letter codes are coerced to lowercase!
"""
if type(single) not in [str, list]:
raise ValueError('`single` must be a str or list of strings')
if type(single) is str:
self.single_mutants += [single.lower()]
if type(single) is list:
self.single_mutants += [x.lower() for x in single]
self.single_mutants = list(set(self.single_mutants))
def add_double_mutants(self, lettercode, genotype):
"""
A method that adds double mutants codes to a dictionary.
Params:
---------
lettercode - str or list, contains the code by which the double
mutant will be referred to
genotype - str or list, contains the genotype that lettercode refers
to
i.e.
{a: bc} - the lettercode is a, and the genotype is b(minus)c(minus)
Output:
appends to the double mutant dictionary.
"""
if type(lettercode) != type(genotype):
raise ValueError('types of lettercode and genotype must match!')
if type(lettercode) is not str:
if len(lettercode) != len(genotype):
raise ValueError('lengths of lettercode\
and genotype must match!')
if type(lettercode) is str:
if lettercode.lower() in self.double_muts.keys():
w = '{0} is already in string\
and was replaced'.format(lettercode.lower())
wng.warn(w)
self.double_muts[lettercode.lower()] = genotype.lower()
return
for i, letter in enumerate(lettercode):
if letter.lower() in self.double_muts.keys():
w = '{0} is already in string\
and was replaced'.format(letter.lower())
wng.warn('{0} is already in string!'.format(letter))
self.double_muts[letter.lower()] = genotype[i].lower()
def add_genmap(self, genmap_path, sep=',', comment='#'):
"""
Add a genmap path to this object.
The genmap file must have exactly two columns:
project_name - the name of each RNA-seq run
genotype - typically, each genotype has n replicates
with n project_name's
batch - batch each project belonged to
I.e.:
run1,WT
run2,WT
run3,WT
run4,mut
run5,mut
run6,mut
Params:
genmap_path - path (including filename) to genmap file
sep - separator used to make genmap
comment - if there are comments, marker used to define comments
"""
self.genmap = pd.read_csv(genmap_path, sep=sep, comment=comment)
columns = ['project_name', 'genotype', 'batch']
if (self.genmap.columns != columns).all():
raise ValueError('genmap is not in the right format!')
self.genmap.genotype = self.genmap.genotype.apply(str)
# make sure everything is always in lowercase
self.genmap.genotype = self.genmap.genotype.apply(str.lower)
def add_tpm(self, main_path, tpm_fname, folder='', sep='\t'):
"""
Add tpm files.
main_path - path where all the tpm files are kept
tpm_fname - generic name of all tpm files (i.e., tpm.csv)
folder - if there are any subfolders to get to tpm_fname, go here
sep - separator used in tpm files
i.e.:
main_path -> genmap.project_name[0] -> folder -> tpm_fname
main_path -> genmap.project_name[1] -> folder -> tpm_fname
returns:
self.tpm - a dictionary (project_name, df)
"""
self.tpm = {} # initialize an empty hash
# get tpm for each project
for prjct in self.genmap.project_name.unique():
path = main_path + prjct + folder + tpm_fname
self.tpm[prjct] = pd.read_csv(path, sep=sep)
self.tpm[prjct].sort_values(self.gene, inplace=True)
self.tpm[prjct].reset_index(drop=True, inplace=True)
def add_betas(self, main_path, fc_fname, folders, sep=','):
"""
Add fold change dfs.
Params:
-------------------------
main_path - str, path to each processed read folder
folders - dict, where keys are the genotypes and the values
are the names of the folders the genotype is in
fc_fname - str, standard name of the fold-change data
sep - separators between columns
Output:
-------------------------
self.beta - dictionary of dataframes
"""
if type(folders) is not dict:
raise ValueError('`folders` must be listlike')
self.beta = {} # empty hash
# get betas for each genotype comparison:
for genotype in folders.keys():
path = main_path + folders[genotype] + fc_fname
self.beta[genotype] = pd.read_csv(path, sep=sep)
# beta dataframes from sleuth MUST BE SORTED By ID!!!!
self.beta[genotype].sort_values(self.gene, inplace=True)
self.beta[genotype].reset_index(drop=True, inplace=True)
def add_beta(self, fname, key, **kwargs):
"""A function to add a file to the beta dictionary."""
if self.beta is None:
self.beta = {}
self.beta[key] = pd.read_csv(fname, **kwargs)
def set_qval(self, q=0.1):
"""A function to set the qvalue parameter."""
if type(q) is not float:
raise ValueError('`q` must be a float!')
if q == 0 or q == 1:
raise ValueError('`q` must be between 0, 1 noninclusive')
self.q = q
def filter_data(self):
"""
A function to filter out NaNs in the beta dataframes.
Params:
count_min - int or float
count_quantile - float
outputs:
filtered_tpm
filtered_beta
"""
for genotype, df in self.beta.items():
df.dropna(subset=['b'], inplace=True)
###############################################################################
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
###############################################################################
# some common functions
def find_rank(morgan, df):
"""A function to find the rank values of a variable."""
d = df.copy()
d.sort_values('b', inplace=True)
rank = np.linspace(0, len(d)-1, len(d))
d['r'] = rank
d.sort_values(morgan.gene, inplace=True)
return d
def find_inliers(morgan, ovx, ovy, trace):
"""A function to find inliers from the Bayesian regression."""
# find the mean and std of the distribution along the line
mean = np.mean(ovy.r - trace.Intercept.mean() -
ovx.r*trace.x.mean())
std = np.std(ovy.r - trace.Intercept.mean() -
ovx.r*trace.x.mean())
# find the total distribution:
intercept = trace.Intercept.mean()
slope = trace.x.mean()
distribution = ovy.r - intercept - ovx.r*slope
# call the inliers and outliers.
# fairly aggressive -- < 1std is inlier, > is outlier
inliers = (np.abs(distribution - mean)/std < 1)
# get a list of the gene candidates (genes close to line)
candidates = ovy[ovy.r.isin(ovy.r[inliers])][morgan.gene]
return candidates
def robust_regress(data, progress=False):
"""A robust regression using a StudentT instead of a Gaussian model."""
with pm.Model():
family = pm.glm.families.StudentT()
pm.glm.glm('y ~ x', data, family=family)
start = pm.find_MAP()
step = pm.NUTS(scaling=start)
trace_robust = pm.sample(2000, step, progressbar=progress)
return trace_robust
class mcclintock(object):
"""
An object that performs bayesian robust regression on a morgan object.
For single mutant analysis.
Attributes:
------------------
name
robust_slope
primary_weights
secondary_slope
secondary weights
"""
def __init__(self, name, morgan, progress):
"""
Initialize function.
Performs bayesian primary and secondary regression.
"""
self.name = name
self.progress = progress
self.robust_regression_primary(morgan, progress)
self.robust_regression_secondary(morgan, progress)
def mcmc_robust(self, data, progress=True):
"""Bayesian Regression Using PyMC3."""
# with pm.Model() as model_robust:
with pm.Model():
family = pm.glm.families.StudentT()
pm.glm.glm('y ~ x', data, family=family)
start = pm.find_MAP()
step = pm.NUTS(scaling=start)
trace_robust = pm.sample(2000, step, progressbar=progress)
return trace_robust
def robust_regression_primary(self, morgan, alpha=10**-4, progress=True):
"""
A function to perform robust spearmanr analyses on all single mutants.
Params:
alpha - float, significance value for spearmanr correlation
progress - Boolean, show progressbar for mcmc
Outputs:
res_dict - a hash containing the results of the analysis.
"""
def perform_mcmc(morgan, ovx, ovy, mut_a, mut_b, progress=True):
"""
A function to perform the robust spearmanr regress.
Written mainly to avoid running into RAM issues. Not
entirely meant for public use.
ovx, ovy -- dataframes to be correlated
mut_a, mut_b -- genotypes of ovx and ovy
"""
# rank order:
ovx = find_rank(morgan, ovx)
ovy = find_rank(morgan, ovy)
# place in a dict
data = dict(x=ovx.r, y=ovy.r)
# run PyMC3 with student T distribution
# to minimize impact of outliers
print('\nstarting comparison of {0}, {1}'.format(i, j))
trace_robust = robust_regress(data, progress)
# find the mean and std of the distribution along the line
candidates = | |
timeout
How long to wait for the output to exist before raising a :class:`htmap.exceptions.TimeoutError`.
If ``None``, wait forever.
"""
return self._load_output(component, timeout=timeout)
def __getitem__(self, item: int) -> Any:
"""Return the output associated with the input index. Does not block."""
return self.get(item, timeout=0)
def get_err(self, component: int, timeout: utils.Timeout = None,) -> errors.ComponentError:
"""
Return the error associated with the input component index.
If the component actually succeeded, this will raise :class:`htmap.exceptions.ExpectedError`.
Parameters
----------
component
The index of the input to get the output for.
timeout
How long to wait for the output to exist before raising a :class:`htmap.exceptions.TimeoutError`.
If ``None``, wait forever.
"""
return self._load_error(component, timeout=timeout)
def __iter__(self) -> Iterator[Any]:
"""
Iterating over the :class:`htmap.Map` yields the outputs in the same order as the inputs,
waiting on each individual output to become available.
"""
yield from self.iter()
def iter(self, timeout: utils.Timeout = None,) -> Iterator[Any]:
"""
Returns an iterator over the output of the :class:`htmap.Map` in the same order as the inputs,
waiting on each individual output to become available.
Parameters
----------
timeout
How long to wait for each output to be available before raising a :class:`htmap.exceptions.TimeoutError`.
If ``None``, wait forever.
"""
for component in self.components:
yield self._load_output(component, timeout=timeout)
def iter_with_inputs(
self, timeout: utils.Timeout = None,
) -> Iterator[Tuple[Tuple[tuple, Dict[str, Any]], Any]]:
"""
Returns an iterator over the inputs and output of the :class:`htmap.Map` in the same order as the inputs,
waiting on each individual output to become available.
Parameters
----------
timeout
How long to wait for each output to be available before raising a :class:`htmap.exceptions.TimeoutError`.
If ``None``, wait forever.
"""
for component in self.components:
output = self._load_output(component, timeout=timeout)
input = self._load_input(component)
yield input, output
def iter_as_available(self, timeout: utils.Timeout = None,) -> Iterator[Any]:
"""
Returns an iterator over the output of the :class:`htmap.Map`,
yielding individual outputs as they become available.
The iteration order is initially random, but is consistent within a single interpreter session once the map is completed.
Parameters
----------
timeout
How long to wait for the entire iteration to complete before raising a :class:`htmap.exceptions.TimeoutError`.
If ``None``, wait forever.
"""
timeout = utils.timeout_to_seconds(timeout)
start_time = time.time()
remaining_indices = set(self.components)
while len(remaining_indices) > 0:
for component in copy(remaining_indices):
try:
output = self._load_output(component, timeout=0)
remaining_indices.remove(component)
yield output
except exceptions.OutputNotFound:
pass
if timeout is not None and time.time() > start_time + timeout:
raise exceptions.TimeoutError("Timed out while waiting for more output")
time.sleep(settings["WAIT_TIME"])
def iter_as_available_with_inputs(
self, timeout: utils.Timeout = None,
) -> Iterator[Tuple[Tuple[tuple, Dict[str, Any]], Any]]:
"""
Returns an iterator over the inputs and output of the :class:`htmap.Map`,
yielding individual ``(input, output)`` pairs as they become available.
The iteration order is initially random, but is consistent within a single interpreter session once the map is completed.
Parameters
----------
timeout
How long to wait for the entire iteration to complete before raising a :class:`htmap.exceptions.TimeoutError`.
If ``None``, wait forever.
"""
timeout = utils.timeout_to_seconds(timeout)
start_time = time.time()
remaining_indices = set(self.components)
while len(remaining_indices) > 0:
for component in copy(remaining_indices):
try:
output = self._load_output(component, timeout=0)
input = self._load_input(component)
remaining_indices.remove(component)
yield input, output
except exceptions.OutputNotFound:
pass
if timeout is not None and time.time() > start_time + timeout:
raise exceptions.TimeoutError("Timed out while waiting for more output")
time.sleep(settings["WAIT_TIME"])
def iter_inputs(self) -> Iterator[Any]:
"""Returns an iterator over the inputs of the :class:`htmap.Map`."""
return (self._load_input(idx) for idx in self.components)
def _requirements(self, requirements: Optional[str] = None) -> str:
"""Build an HTCondor requirements expression that captures all of the ``cluster_id`` for this map."""
base = f"({' || '.join(f'ClusterId=={cid}' for cid in self._cluster_ids)})"
extra = f" && {requirements}" if requirements is not None else ""
return base + extra
def _query(
self, requirements: Optional[str] = None, projection: Optional[List[str]] = None,
) -> Iterator[classad.ClassAd]:
"""
Perform a _query against the HTCondor cluster to get information about the map jobs.
Parameters
----------
requirements
A ClassAd expression to use as the requirements for the _query.
In addition to whatever restrictions given in this expression, the _query will only target the jobs for this map.
projection
The ClassAd attributes to return from the _query.
Returns
-------
classads
An iterator of matching :class:`classad.ClassAd`, with only the projected fields.
"""
if projection is None:
projection = []
req = self._requirements(requirements)
schedd = condor.get_schedd()
q = schedd.xquery(requirements=req, projection=projection,)
logger.debug(
f'Queried for map {self.tag} (requirements = "{req}") with projection {projection}'
)
yield from q
@property
def component_statuses(self) -> List[state.ComponentStatus]:
"""
Return the current :class:`state.ComponentStatus` of each component in the map.
"""
return self._state.component_statuses
def components_by_status(self) -> Mapping[state.ComponentStatus, Tuple[int, ...]]:
"""
Return the component indices grouped by their states.
Examples
--------
This example finds the completed jobs for a submitted map,
and processes those results:
.. code:: python
from time import sleep
import htmap
def job(x):
sleep(x)
return 1 / x
m = htmap.map(job, [0, 2, 4, 6, 8], tag="foo")
# Wait for all jobs to finish.
# Alternatively, use `futures = htmap.load("foo")` on a different process
sleep(10)
completed = m.components_by_status()[htmap.JobStatus.COMPLETED]
for component in completed:
result = m.get(future)
# Whatever processing needs to be done
print(result) # prints "2", "4", "6", and "8"
"""
status_to_components: MutableMapping[
state.ComponentStatus, List[int]
] = collections.defaultdict(list)
for component, status in enumerate(self.component_statuses):
status_to_components[status].append(component)
return {
status: tuple(sorted(components)) for status, components in status_to_components.items()
}
def status(self) -> str:
"""Return a string containing the number of jobs in each status."""
counts = collections.Counter(self.component_statuses)
stat = " | ".join(
f"{str(js)} = {counts[js]}" for js in state.ComponentStatus.display_statuses()
)
msg = f"{self.__class__.__name__} {self.tag} ({len(self)} components): {stat}"
return utils.rstr(msg)
@property
def holds(self) -> Dict[int, holds.ComponentHold]:
"""
A dictionary of component indices to their :class:`Hold` (if they are held).
"""
return self._state.holds
def hold_report(self) -> str:
"""
Return a string containing a formatted table describing any held components.
"""
headers = ["Component", "Code", "Hold Reason"]
rows = [(component, hold.code, hold.reason) for component, hold in self.holds.items()]
return utils.table(
headers=headers, rows=rows, alignment={"Component": "ljust", "Hold Reason": "ljust",},
)
@property
def errors(self) -> Dict[int, errors.ComponentError]:
"""
A dictionary of component indices to their :class:`ExecutionError`
(if that component experienced an error).
"""
err = {}
for idx in self.components:
try:
err[idx] = self.get_err(idx)
except (
exceptions.OutputNotFound,
exceptions.ExpectedError,
exceptions.MapComponentHeld,
) as e:
pass
return err
def error_reports(self) -> Iterator[str]:
"""
Yields the error reports for any components that experienced an error during execution.
"""
for idx in self.components:
try:
yield self.get_err(idx, timeout=0).report()
except (
exceptions.OutputNotFound,
exceptions.ExpectedError,
exceptions.TimeoutError,
exceptions.MapComponentHeld,
) as e:
pass
@property
def memory_usage(self) -> List[int]:
"""
Return the latest peak memory usage of each map component, measured in MB.
A component that hasn't reported yet will show a ``0``.
.. warning::
Due to current limitations in HTCondor, memory use for very
short-lived components (<5 seconds) will not be accurate.
"""
return self._state.memory_usage
@property
def runtime(self) -> List[datetime.timedelta]:
"""Return the total runtime (user + system) of each component."""
return self._state.runtime
@property
def local_data(self) -> int:
"""Return the number of bytes stored on the local disk by the map."""
# this cache is invalidated by the state reader loop when appropriate
if self._local_data is None:
logger.debug(
f"Getting map directory size for map {self.tag} (map directory is {self._map_dir})"
)
with utils.Timer() as timer:
self._local_data = utils.get_dir_size(self._map_dir, safe=False)
logger.debug(
f"Map directory size for map {self.tag} is {utils.num_bytes_to_str(self._local_data)} (took {timer.elapsed:.6f} seconds)"
)
return self._local_data
def _act(
self, action: htcondor.JobAction, requirements: Optional[str] = None,
) -> classad.ClassAd:
"""Perform an action on all of the jobs associated with this map."""
if not self.is_active:
return classad.ClassAd()
schedd = condor.get_schedd()
req = self._requirements(requirements)
a = schedd.act(action, req)
logger.debug(f'Acted on map {self.tag} (requirements = "{req}") with action {action}')
return a
def remove(self, force: bool = False) -> None:
"""
This command removes a map from the Condor queue. Functionally, this
command aborts a job.
This function will completely remove a map from the Condor
queue regardless of job state (running, executing, waiting, etc).
All data associated with a removed map is permanently deleted.
Parameters
----------
force
If ``True``, do not wait for HTCondor | |
<gh_stars>1-10
## kClassification.py
## K-label classification cadres with cross-entropy loss
## NOTE: This file needs to be tested and should get an example analysis notebook.
from __future__ import division, print_function, absolute_import
import time
import numpy as np
import tensorflow as tf
import utility as u
from itertools import product
class multilabelCadreModel(object):
def __init__(self, M=2, gamma=10., lambda_d=0.01, lambda_W=0.01,
alpha_d=0.9, alpha_W=0.9, Tmax=10000, record=100,
eta=2e-3, Nba=64, eps=1e-3, termination_metric='accuracy'):
## hyperparameters / structure
self.M = M # number of cadres
self.gamma = gamma # cadre assignment sharpness
self.lambda_d = lambda_d # regularization strengths
self.lambda_W = lambda_W
self.alpha_d = alpha_d # elastic net mixing weights
self.alpha_W = alpha_W
self.fitted = False
## optimization settings
self.Tmax = Tmax # maximum iterations
self.record = record # record points
self.eta = eta # initial stepsize
self.Nba = Nba # minibatch size
self.eps = eps # convergence tolerance
self.termination_metric = termination_metric
## parameters
self.W = 0 # regression weights
self.W0 = 0 # regression biases
self.C = 0 # cadre centers
self.d = 0 # cadre assignment weights
## data
self.data = None # pd.DataFrame containing features and response
self.cadreFts = None # pd.Index of column-names giving features used for cadre assignment
self.predictFts = None # pd.Index of column-names giving features used for target-prediction
self.targetCol = None # string column-name of response variable
## outputs
self.metrics = {'training': {'loss': [],
'accuracy': []},
'validation': {'loss': [],
'accuracy': []}}
self.time = [] # times
self.proportions = [] # cadre membership proportions during training
self.termination_reason = None # why training stopped
def get_params(self, deep=True):
return {'M': self.M, 'gamma': self.gamma, 'lambda_d': self.lambda_d,
'lambda_W': self.lambda_W, 'alpha_d': self.alpha_d,
'alpha_W': self.alpha_W, 'Tmax': self.Tmax, 'record': self.record,
'eta': self.eta, 'Nba': self.Nba, 'eps': self.eps}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def fit(self, data, targetCol, cadreFts=None, predictFts=None, dataVa=None,
seed=16162, store=False, progress=False):
np.random.seed(seed)
"""Fits multilabel classification cadre model"""
## store categories of column names
self.targetCol = targetCol
if cadreFts is not None:
self.cadreFts = cadreFts
else:
self.cadreFts = data.drop(targetCol, axis=1).columns
if predictFts is not None:
self.predictFts = predictFts
else:
self.predictFts = data.drop(targetCol, axis=1).columns
## get dataset attributes
self.fitted = True
if store:
self.data = data
Pcadre, Ppredict, Ntr = self.cadreFts.shape[0], self.predictFts.shape[0], data.shape[0]
## split data into separate numpy arrays for faster access
## features for cadre-assignment
dataCadre = data.loc[:,self.cadreFts].values
## features for target-prediction
dataPredict = data.loc[:,self.predictFts].values
## target feature
dataTarget = data.loc[:,[self.targetCol]].values
K = np.unique(dataTarget).shape[0]
if dataVa is not None:
dataCadreVa = dataVa.loc[:,self.cadreFts].values
dataPredictVa = dataVa.loc[:,self.predictFts].values
dataTargetVa = dataVa.loc[:,[self.targetCol]].values
############################################
## tensorflow parameters and placeholders ##
############################################
tf.reset_default_graph()
## cadre centers parameter
C = tf.Variable(np.random.normal(loc=0., scale=0.1, size=(Pcadre,self.M)),
dtype=tf.float32, name='C')
## cadre determination weights parameter
d = tf.Variable(np.random.uniform(size=(Pcadre)), dtype=tf.float32, name='d')
## regression hyperplane weights parameter
W = tf.Variable(np.random.normal(loc=0., scale=0.1, size=(K,Ppredict,self.M)),
dtype=tf.float32, name='W')
## regression hyperplane bias parameter
W0 = tf.Variable(tf.zeros(shape=(K,self.M), dtype=tf.float32),
dtype=tf.float32, name='W0')
Xcadre = tf.placeholder(dtype=tf.float32, shape=(None,Pcadre), name='Xcadre')
Xpredict = tf.placeholder(dtype=tf.float32, shape=(None,Ppredict), name='Xpredict')
Y = tf.placeholder(dtype=tf.int32, shape=(None, ), name='Y')
eta = tf.placeholder(dtype=tf.float32, shape=(), name='eta')
lambda_Ws = tf.placeholder(dtype=tf.float32, shape=(self.M,), name='lambda_Ws')
## T[n,m] = ||x^n - c^m||^2_D
T = tf.einsum('npm,p->nm',
tf.square(tf.map_fn(lambda x: tf.expand_dims(x,1) - C, Xcadre)),
d)
## G[n,m] = g_m(x^n)
## = 1 / sum_m' exp(gamma(T[n,m] - T[n,m']))
G = 1 / tf.map_fn(lambda t:
tf.reduce_sum(tf.exp(self.gamma*(tf.expand_dims(t,1) -
tf.expand_dims(t,0))), axis=1), T, name='G')
bstCd = tf.argmax(G, axis=1, name='bestCadre')
## E[n,y,m] = e^m_y(x^n)
E = tf.add(tf.einsum('np,kpm->nkm', Xpredict, W), W0, name='E')
## F[n,k] = f_k(x^n)
F = tf.einsum('nm,nkm->nk', G, E, name='F')
Yhat = tf.argmax(F, axis=1)
## observation-wise error terms (based on jensen's inequality)
error_terms = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y, logits=F)
loss_score = tf.reduce_mean(error_terms)
## regularization
l2_d = self.lambda_d * (1 - self.alpha_d) * tf.reduce_sum(d**2)
l2_W = self.lambda_W * (1 - self.alpha_W) * tf.reduce_sum(lambda_Ws * W**2)
l1_d = self.lambda_d * self.alpha_d * tf.reduce_sum(tf.abs(d))
l1_W = self.lambda_W * self.alpha_W * tf.reduce_sum(lambda_Ws * tf.abs(W))
l2_C = 1e-7 * tf.reduce_sum(C**2)
## loss that is fed into optimizer
loss_opt = loss_score + l2_d + l2_W + l2_C
## full loss, including l1 terms handled with proximal gradient
loss_full = loss_opt + l1_d + l1_W
optimizer = tf.train.AdamOptimizer(learning_rate=eta).minimize(loss_opt)
## nonsmooth proximal terms
thresh_W = tf.assign(W, tf.sign(W) * (tf.abs(W) - eta * self.lambda_W * lambda_Ws * self.alpha_W) * tf.cast(tf.abs(W) > eta * self.lambda_W * self.alpha_W, tf.float32))
thresh_d = tf.assign(d, tf.maximum(0., tf.sign(d) * (tf.abs(d) - eta * self.lambda_d * self.alpha_d) * tf.cast(tf.abs(d) > eta * self.lambda_d * self.alpha_d, tf.float32)))
####################
## learning model ##
####################
with tf.Session() as sess:
tf.global_variables_initializer().run()
if progress:
if dataVa is not None:
print('numbers being printed:',
'SGD iteration, training loss, training accuracy, validation loss, validation accuracy, time')
else:
print('numbers being printed:',
'SGD iteration, training loss, training accuracy, time')
t0 = time.time()
## perform optimization
for t in range(self.Tmax):
inds = np.random.choice(Ntr, self.Nba, replace=False)
## calculate adaptive regularization parameter
cadres = bstCd.eval(feed_dict={Xcadre: dataCadre[inds,:], Xpredict: dataPredict[inds,:]})
cadre_counts = np.zeros(self.M)
for m in range(self.M):
cadre_counts[m] = np.sum(cadres == m) + 1
cadre_counts = cadre_counts.sum() / cadre_counts
## take SGD step
sess.run(optimizer, feed_dict={Xcadre: dataCadre[inds,:],
Xpredict: dataPredict[inds,:],
Y: target_tr[inds,:],
lambda_Ws: cadre_counts,
eta: self.eta / np.sqrt(t+1)})
## take proximal gradient step
sess.run([thresh_d, thresh_W], feed_dict={eta: self.eta / np.sqrt(t+1), lambda_Ws: cadre_counts})
# record-keeping
if not t % self.record:
if progress:
if len(self.time) and dataVa is not None:
print(t,
self.metrics['training']['loss'][-1],
self.metrics['training']['accuracy'][-1],
self.metrics['validation']['loss'][-1],
self.metrics['validation']['accuracy'][-1],
self.time[-1])
elif len(self.time):
print(t,
self.metrics['training']['loss'][-1],
self.metrics['training']['accuracy'][-1],
self.time[-1])
else:
print(t)
self.time.append(time.time() - t0)
## calculate metrics -- this should be its own function since it gets repeated
cadres = bstCd.eval(feed_dict={Xcadre: dataCadre, Xpredict: dataPredict})
cadre_counts = np.zeros(self.M)
for m in range(self.M):
cadre_counts[m] = np.sum(cadres == m) + 1
cadre_counts = cadre_counts / cadre_counts.sum()
self.metrics['training']['loss'].append(l)
self.metrics['training']['accuracy'].append(np.mean(yhat == dataTarget))
self.proportions.append(pd.Series(cadres).value_counts().T)
self.proportions[-1] /= self.proportions[-1].sum()
if dataVa is not None:
cadres = bstCd.eval(feed_dict={Xcadre: dataCadreVa, Xpredict: dataPredictVa})
cadre_counts = np.zeros(self.M)
for m in range(self.M):
cadre_counts[m] = np.sum(cadres == m) + 1
cadre_counts = cadre_counts / cadre_counts.sum()
l, margin = sess.run([loss_full, F], feed_dict={Xcadre: dataCadreVa,
Xpredict: dataPredictVa,
lambda_Ws: cadre_counts,
Y: target_va})
self.metrics['validation']['loss'].append(l)
self.metrics['validation']['accuracy'].append(np.mean(yhat == dataTargetVa))
if dataVa is not None:
if len(self.time) > 1:
last_metric = self.metrics['validation'][self.termination_metric][-1]
second_last_metric = self.metrics['validation'][self.termination_metric][-2]
if np.abs(last_metric - second_last_metric) < self.eps:
self.termination_reason = 'lack of sufficient decrease in validation ' + self.termination_metric
break
else:
if len(self.time) > 1:
last_metric = self.metrics['training'][self.termination_metric][-1]
second_last_metric = self.metrics['training'][self.termination_metric][-2]
if np.abs(last_metric - second_last_metric) < self.eps:
self.termination_reason = 'lack of sufficient decrease in training ' + self.termination_metric
break
if self.termination_reason == None:
self.termination_reason = 'model took ' + str(self.Tmax) + ' SGD steps'
if progress:
print('training has terminated because: ' + str(self.termination_reason))
self.C, self.d, self.W, self.W0 = C.eval(), d.eval(), W.eval(), W0.eval()
self.C = pd.DataFrame(self.C, index=self.cadreFts)
self.d = pd.Series(self.d, index=self.cadreFts)
self.W = pd.DataFrame(self.W, index=self.predictFts)
## clean up output for easier analysis
self.metrics['training'] = pd.DataFrame(self.metrics['training'])
if dataVa is not None:
self.metrics['validation'] = pd.DataFrame(self.metrics['validation'])
self.proportions = pd.concat(self.proportions, axis=1).T
return self
def predictFull(self, Xnew):
"""Returns predicted values, cadre weights, and cadre estimates for new data"""
if not self.fitted: print('warning: model not yet fit')
tf.reset_default_graph()
C = tf.Variable(self.C.values, dtype=tf.float32, name='C')
d = tf.Variable(self.d.values, dtype=tf.float32, name='d')
W = tf.Variable(self.W.values, dtype=tf.float32, name='W')
W0 = tf.Variable(self.W0, dtype=tf.float32, name='w0')
Xcadre = tf.placeholder(dtype=tf.float32, shape=(None,self.cadreFts.shape[0]), name='Xcadre')
Xpredict = tf.placeholder(dtype=tf.float32, shape=(None,self.predictFts.shape[0]), name='Xpredict')
Y = tf.placeholder(dtype=tf.float32, shape=(None,1), name='Y')
## T[n,m] = ||x^n - c^m||^2_D
T = tf.einsum('npm,p->nm',
tf.square(tf.map_fn(lambda x: tf.expand_dims(x,1) - C, Xcadre)),
d)
## G[n,m] = g_m(x^n)
## = 1 / sum_m' exp(gamma(T[n,m] - T[n,m']))
G = 1 / tf.map_fn(lambda t:
tf.reduce_sum(tf.exp(self.gamma*(tf.expand_dims(t,1) -
tf.expand_dims(t,0))), axis=1), T, name='G')
## E[n,y,m] = e^m_y(x^n)
E = tf.add(tf.einsum('np,ypm->nym', X, W), W0, name='E')
## F[n,y] = f_y(x^n)
F = tf.einsum('nm,nym->ny', G, E, name='F')
Yhat = tf.argmax(F, axis=1)
bstCd = tf.argmax(G, axis=1, name='bestCadre')
## observation-wise error terms (based on jensen's inequality)
error_terms = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y, logits=F)
loss_score = tf.reduce_mean(error_terms)
## regularization
l2_d = self.lambda_d * (1 - self.alpha_d) * tf.reduce_sum(d**2)
l2_W = self.lambda_W * (1 | |
783.0 5.580 0
784.0 5.578 0
785.0 5.578 0
786.0 5.576 0
787.0 5.552 0
788.0 5.534 0
789.0 5.534 0
790.0 5.534 0
791.0 5.539 0
792.0 5.542 0
793.0 5.531 0
794.0 5.524 0
795.0 5.520 0
796.0 5.533 0
797.0 5.562 0
798.0 5.566 0
799.0 .000 1
800.0 .000 1
801.0 .000 1
802.0 .000 1
803.0 .000 1
804.0 .000 1
805.0 .000 1
806.0 .000 1
807.0 .000 1
808.0 .000 1
809.0 .000 1
810.0 .000 1
811.0 .000 1
812.0 .000 1
813.0 .000 1
814.0 .000 1
815.0 .000 1
816.0 .000 1
817.0 .000 1
818.0 .000 1
819.0 .000 1
820.0 .000 1
821.0 .000 1
822.0 .000 1
823.0 .000 1
824.0 .000 1
825.0 .000 1
826.0 .000 1
827.0 .000 1
828.0 .000 1
829.0 .000 1
830.0 .000 1
831.0 .000 1
832.0 .000 1
833.0 .000 1
834.0 .000 1
835.0 .000 1
836.0 .000 1
837.0 .000 1
838.0 .000 1
839.0 .000 1
840.0 .000 1
841.0 .000 1
842.0 .000 1
843.0 .000 1
844.0 .000 1
845.0 .000 1
846.0 .000 1
847.0 .000 1
848.0 .000 1
849.0 .000 1
850.0 .000 1
851.0 .000 1
852.0 .000 1
853.0 .000 1
854.0 .000 1
855.0 .000 1
856.0 .000 1
857.0 .000 1
858.0 .000 1
859.0 .000 1
860.0 .000 1
861.0 .000 1
862.0 .000 1
863.0 .000 1
864.0 .000 1
865.0 .000 1
866.0 .000 1
867.0 .000 1
868.0 .000 1
869.0 .000 1
870.0 .000 1
871.0 .000 1
872.0 .000 1
873.0 .000 1
874.0 .000 1
875.0 .000 1
876.0 .000 1
877.0 .000 1
878.0 .000 1
879.0 .000 1
880.0 .000 1
881.0 .000 1
882.0 .000 1
883.0 .000 1
884.0 .000 1
885.0 .000 1
886.0 .000 1
887.0 .000 1
888.0 .000 1
889.0 .000 1
890.0 .000 1
891.0 .000 1
892.0 .000 1
893.0 .000 1
894.0 .000 1
895.0 .000 1
896.0 .000 1
897.0 .000 1
898.0 .000 1
899.0 .000 1
900.0 .000 1
901.0 .000 1
902.0 .000 1
903.0 .000 1
904.0 .000 1
905.0 .000 1
906.0 .000 1
907.0 .000 1
908.0 .000 1
909.0 .000 1
910.0 .000 1
911.0 .000 1
912.0 .000 1
913.0 .000 1
914.0 .000 1
915.0 .000 1
916.0 .000 1
917.0 .000 1
918.0 .000 1
919.0 .000 1
920.0 .000 1
921.0 .000 1
922.0 .000 1
923.0 .000 1
924.0 .000 1
925.0 .000 1
926.0 .000 1
927.0 .000 1
928.0 .000 1
929.0 .000 1
930.0 .000 1
931.0 .000 1
932.0 .000 1
933.0 .000 1
934.0 .000 1
935.0 .000 1
936.0 .000 1
937.0 .000 1
938.0 .000 1
939.0 .000 1
940.0 .000 1
941.0 .000 1
942.0 .000 1
943.0 .000 1
944.0 .000 1
945.0 .000 1
946.0 .000 1
947.0 .000 1
948.0 .000 1
949.0 .000 1
950.0 .000 1
951.0 .000 1
952.0 .000 1
953.0 .000 1
954.0 .000 1
955.0 .000 1
956.0 .000 1
957.0 .000 1
958.0 .000 1
959.0 .000 1
960.0 .000 1
961.0 .000 1
962.0 .000 1
963.0 .000 1
964.0 .000 1
965.0 .000 1
966.0 .000 1
967.0 .000 1
968.0 .000 1
969.0 .000 1
970.0 .000 1
971.0 .000 1
972.0 .000 1
973.0 .000 1
974.0 .000 1
975.0 .000 1
976.0 .000 1
977.0 .000 1
978.0 .000 1
979.0 .000 1
980.0 .000 1
981.0 .000 1
982.0 .000 1
983.0 .000 1
984.0 .000 1
985.0 .000 1
986.0 .000 1
987.0 .000 1
988.0 .000 1
989.0 .000 1
990.0 .000 1
991.0 .000 1
992.0 .000 1
993.0 .000 1
994.0 .000 1
995.0 .000 1
996.0 .000 1
997.0 .000 1
998.0 .000 1
999.0 .000 1
HH 10082287 -59.660 115.730 1996 1 25 103PFL
4.0 2.468 0
9.9 2.468 0
15.8 2.468 0
21.8 2.468 0
27.7 2.468 0
33.7 2.468 0
39.6 2.426 0
45.6 2.384 0
51.5 2.341 0
57.4 1.919 0
63.4 1.709 0
69.3 1.687 0
75.3 1.582 0
81.2 1.456 0
86.2 1.330 0
92.1 1.245 0
98.0 1.161 0
104.0 1.035 0
109.9 .888 0
115.9 .783 0
121.8 .720 0
127.7 .699 0
133.7 .699 0
139.6 .699 0
145.6 .657 0
151.5 .636 0
157.4 .699 0
163.4 .699 0
169.3 .678 0
175.3 .741 0
181.2 .825 0
186.1 1.014 0
192.1 1.224 0
198.0 1.351 0
204.0 1.372 0
209.9 1.456 0
215.8 1.561 0
221.8 1.624 0
227.7 1.645 0
233.6 1.751 0
239.6 1.814 0
245.5 1.877 0
251.5 1.877 0
257.4 1.877 0
263.3 1.898 0
269.3 1.961 0
275.2 1.961 0
281.1 1.961 0
286.1 1.983 0
292.0 2.046 0
298.0 2.046 0
303.9 2.046 0
309.8 2.046 0
315.8 2.046 0
321.7 2.046 0
327.6 2.046 0
333.6 2.109 0
339.5 2.130 0
345.4 2.130 0
351.4 2.130 0
357.3 2.130 0
363.2 2.130 0
369.2 2.130 0
375.1 2.130 0
381.0 2.130 0
386.0 2.130 0
391.9 2.130 0
397.8 2.151 0
409.7 2.215 1
421.6 2.215 1
433.4 2.215 1
445.3 2.215 1
457.1 2.215 1
469.0 2.215 1
480.9 2.215 1
491.7 2.215 1
503.6 2.215 1
515.5 2.215 1
527.3 2.215 1
539.2 2.215 1
551.0 2.215 1
562.9 2.215 1
574.7 2.215 1
585.6 2.215 1
597.5 2.215 1
609.3 2.215 1
621.2 2.215 1
633.0 2.215 1
644.9 2.151 0
656.7 2.130 0
668.6 2.130 0
680.4 2.130 0
691.3 2.130 0
703.1 2.130 0
715.0 2.130 0
726.8 2.130 0
738.6 2.130 0
750.5 2.088 0
762.3 2.088 0
774.2 2.046 0
791.0 2.046 0
808.7 2.046 0
826.5 2.046 0
HH 13110218 -62.608 96.778 2008 3 20 26APB
2.0 -2.296 1
5.0 -2.296 1
14.9 -2.296 1
24.8 -2.296 1
34.7 -2.296 1
44.6 -2.296 1
54.5 -2.296 1
64.4 -2.296 1
74.3 -2.296 1
84.2 -2.296 1
94.1 -2.296 1
104.0 -2.296 1
113.9 -2.296 1
123.8 -2.296 1
133.7 -2.296 1
143.6 -2.296 1
153.4 -2.296 1
163.3 -2.296 1
173.2 -2.296 1
249.4 -2.296 1
259.3 -2.011 0
279.1 -.825 0
298.9 -.255 0
368.1 .409 0
417.5 .616 0
930.8 .272 0
HH 4373438 34.000 -65.730 1950 4 19 10MBT
.0 18.300 1
9.0 18.300 1
15.0 18.300 1
30.0 18.300 1
46.0 18.300 1
76.0 18.300 1
100.0 18.300 1
150.0 18.300 1
200.0 18.300 1
250.0 17.000 0
HH 2997813 -33.930 154.670 1969 8 1 89XBT
.0 19.390 1
5.0 19.390 1
10.0 19.390 1
15.0 19.390 1
20.0 19.390 1
25.0 19.390 1
30.0 19.390 1
35.0 19.390 1
40.0 19.390 1
45.0 19.390 1
50.0 19.390 1
55.0 19.390 1
60.0 19.390 1
65.0 19.390 1
70.0 19.390 1
75.0 19.390 1
80.0 19.390 1
85.0 19.390 1
90.0 19.390 1
95.0 19.390 1
100.0 19.390 1
105.0 19.390 1
110.0 19.390 1
115.0 19.390 1
120.0 19.390 1
125.0 19.390 1
130.0 19.390 1
135.0 19.390 1
140.0 19.390 1
145.0 19.390 1
150.0 19.390 1
155.0 19.390 1
160.0 19.390 1
165.0 19.390 1
170.0 19.390 1
175.0 19.390 1
180.0 19.390 1
185.0 19.390 1
190.0 19.390 1
195.0 19.390 1
200.0 19.390 1
205.0 19.390 1
210.0 19.390 1
215.0 19.390 1
220.0 19.390 1
225.0 19.390 1
230.0 19.390 1
235.0 19.390 1
240.0 19.390 1
245.0 19.390 1
250.0 19.390 1
255.0 19.390 1
260.0 19.390 1
265.0 19.390 1
270.0 19.390 1
275.0 19.200 0
280.0 19.000 0
285.0 19.000 0
290.0 18.890 0
295.0 18.790 0
300.0 18.700 0
305.0 | |
].AdjointGradientJacobi( v.tVector[ i ][ 0 ], j.tVector[ i ][ 0 ], dj.tVector[ i ][ 0 ] )
jOutput.tVector[ i ][ 1 ], jOutputDash.tVector[ i ][ 1 ] = self.pt[ i ][ 1 ].AdjointGradientJacobi( v.tVector[ i ][ 1 ], j.tVector[ i ][ 1 ], dj.tVector[ i ][ 1 ] )
return jOutput, jOutputDash
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.pt, self.meanRadius ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.pt = infoList[ 2 ]
self.meanRadius = infoList[ 3 ]
self.pos = self.pt[ 0 ]
self.rad = self.pt[ 1 ]
self.spoke1 = self.pt[ 2 ]
self.spoke2 = self.pt[ 3 ]
##########################################################################
## Kendall 2D Shape Space ##
##########################################################################
class kendall2D_tVec( object ):
# def __init__( self ):
# self.Type = "Sphere_Tangent"
# self.nDim = 3
# self.tVector = [ 0, 0, 0 ]
def __init__( self, nPt ):
self.Type = "Kendall2D_Tangent"
self.nPt = nPt
self.nDim = nPt - 2
self.tVector = np.zeros( [ 2, nPt ] )
def GetTangentVector(self):
return self.tVector
def SetTangentVector(self, tVec):
if not tVec.shape[ 1 ] == self.nPt:
print( "Error : # of points does not match" )
return
if not tVec.shape[ 0 ] == 2:
print( "Error : Tangent vector should be 2D" )
return
self.tVector = tVec
def InnerProduct( self, tVec1 ):
result = 0
for i in range( self.nPt ):
for j in range( 2 ):
result += self.tVector[ j, i ] * tVec1.tVector[ j, i ]
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ScalarMultiply( self, t ):
tVector_t = kendall2D_tVec( self.nPt )
for i in range( self.nPt ):
for j in range( 2 ):
tVector_t.tVector[ j, i ] = self.tVector[ j, i ] * t
return tVector_t
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.nPt, self.tVector, False ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.nPt = infoList[ 2 ]
self.tVector = infoList[ 3 ]
class kendall2D( object ):
def __init__( self, nPt ):
self.Type = "Kendall2D"
self.nPt = nPt
self.nDim = nPt - 2
pt_base = np.zeros( [ 2, nPt ] )
pt_base[ 0, 0 ] = 1
pt_base[ 0, 1 ] = 0
self.pt = pt_base
def SetPoint( self, pt ):
if not pt.shape[ 1 ] == self.nPt:
print( "Error : # of Points does not match" )
return
if not pt.shape[ 0 ] == 2:
print( "Error : Point should be 2D" )
return
if not np.linalg.norm( pt ) == 1:
# print( "Warning : The point is not on a sphere")
self.pt = np.asmatrix( pt )
return
self.pt = np.asmatrix( pt )
def GetPoint( self ):
return self.pt
def InnerProduct( self, ptA ):
result = 0
for i in range( self.nPt ):
for j in range( 2 ):
result += self.pt[ j, i ] * ptA.pt[ j, i ]
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ExponentialMap( self, tVec ):
theta = tVec.norm()
if theta < 1e-12:
exp_pt = kendall2D( self.nPt )
exp_pt.pt = self.pt
return exp_pt
if theta > np.pi * 2:
theta = np.mod( theta, np.pi * 2 )
exp_pt = kendall2D( self.nPt )
lhs = np.multiply( np.cos( theta ), self.pt )
rhs = np.multiply( np.sin( theta ) / theta, tVec.tVector )
exp_pt.pt = lhs + rhs
exp_pt.pt = np.divide( exp_pt.pt, exp_pt.norm() )
return exp_pt
def LogMap( self, another_pt ):
m = np.matmul( self.pt, another_pt.pt.T )
U, s, V = np.linalg.svd( m )
rotation = np.matmul( U, V.T )
qRot_pt = np.matmul( rotation, another_pt.pt )
qRot = kendall2D( self.nPt )
qRot.SetPoint( qRot_pt )
cosTheta = self.InnerProduct( qRot )
tVec = kendall2D_tVec( self.nPt )
tVec_mat = np.subtract( qRot.pt, np.multiply( cosTheta, self.pt ) )
tVec.SetTangentVector( tVec_mat )
length = tVec.norm()
if length < 1e-12 or cosTheta >= 1.0 or cosTheta <= -1.0:
tVec = kendall2D_tVec( self.nPt )
return tVec
tVec = tVec.ScalarMultiply( np.arccos( cosTheta ) / length )
return tVec
def ParallelTranslate( self, v, w ):
vNorm = v.norm()
pNorm = self.norm()
if( vNorm < 1.0e-12 or pNorm < 1.0e-12 ):
# print( "tVector too small" )
return w
skew = np.zeros( [ 2, 2 ] )
skew[ 0, 1 ] = -1
skew[ 1, 0 ] = 1
unitV = v.ScalarMultiply( 1.0 / vNorm )
unitJV_mat = np.matmul( skew, unitV.tVector )
unitJV = kendall2D_tVec( self.nPt )
unitJV.SetTangentVector( unitJV_mat )
unitP = self.ScalarMultiply( 1.0 / pNorm )
unitJP_mat = np.matmul( skew, unitP.pt )
unitJP = kendall2D( self.nPt )
unitJP.SetPoint( unitJP_mat )
# If v and w are horizontal, the real inner product will work
wDotUnitV = unitV.InnerProduct( w )
wDotUnitJV = unitJV.InnerProduct( w )
# Component of w orthogonal to v and jv
parallel_mat = np.add( np.multiply( wDotUnitV, unitV.tVector ), np.multiply( wDotUnitJV, unitJV_mat ) )
orth_mat = np.subtract( w.tVector, parallel_mat )
# Compute Parallel Translated V
parallelUnitV_mat = np.add( np.multiply( self.pt, -np.sin( vNorm ) / pNorm ), np.multiply( np.cos( vNorm ), unitV.tVector ) )
# Compute Parallel Translated jV
parallelUnitJV_mat = np.subtract( np.multiply( np.cos( vNorm ), unitJV_mat ), np.multiply( np.sin( vNorm ), unitJP_mat ) )
# Add parallel translated v to orth, and get parallel translated w
parallelW_paraV = np.add( np.multiply( wDotUnitV, parallelUnitV_mat ), np.multiply( wDotUnitJV, parallelUnitJV_mat ) )
parallelW_mat = np.add( parallelW_paraV, orth_mat )
wParallelTranslated = kendall2D_tVec( self.nPt )
wParallelTranslated.SetTangentVector( parallelW_mat )
return wParallelTranslated
def ParallelTranslateAtoB( self, a, b, w ):
v = a.LogMap( b )
return a.ParallelTranslate( v, w )
def ParallelTranslateToA( self, a, w ):
v = self.LogMap( a )
return self.ParallelTranslate( v, w )
def ScalarMultiply( self, t ):
p_t = kendall2D( self.nPt )
for i in range( self.nPt ):
for j in range( 2 ):
p_t.pt[ j, i ] = self.pt[ j, i ] * t
return p_t
def GradientJacobi( self, v, J, dJ ):
vNorm = v.norm()
if( vNorm < 1.0e-12 ):
for i in range( self.nPt ):
for k in range( 2 ):
J.tVector[ k ][ i ] = J.tVector[ k ][ i ] + dJ.tVector[ k ][ i ]
return J
VdotJ = v.InnerProduct( J )
VdotJPrime = v.InnerProduct( dJ )
scaleFactorJ = VdotJ / ( vNorm * vNorm )
scaleFactorJPrime = VdotJPrime / ( vNorm * vNorm )
jTang_mat = np.multiply( v.tVector, scaleFactorJ )
jTang = kendall2D_tVec( self.nPt )
jTang.SetTangentVector( jTang_mat )
dJTang_mat = np.multiply( v.tVector, scaleFactorJPrime )
dJTang = kendall2D_tVec( self.nPt )
dJTang.SetTangentVector( dJTang_mat )
jOrth_mat = np.subtract( J.tVector, jTang_mat )
jOrth = kendall2D_tVec( self.nPt )
jOrth.SetTangentVector( jOrth_mat )
dJOrth_mat = np.subtract( dJ.tVector, dJTang_mat )
dJOrth = kendall2D_tVec( self.nPt )
dJOrth.SetTangentVector( dJOrth_mat )
skew = np.zeros( [ 2, 2 ] )
skew[ 0, 1 ] = -1
skew[ 1, 0 ] = 1
unitV = v.ScalarMultiply( 1.0 / vNorm )
w_mat = np.matmul( skew, unitV.tVector )
w = kendall2D_tVec( self.nPt )
w.SetTangentVector( w_mat )
# Curvature 4 component
jOrth4 = w.ScalarMultiply( w.InnerProduct( jOrth ) )
dJOrth4 = w.ScalarMultiply( w.InnerProduct( dJOrth ) )
# Curvature 1 Component
jOrth1 = kendall2D_tVec( self.nPt )
jOrth1.SetTangentVector( np.subtract( jOrth.tVector, jOrth4.tVector ) )
dJOrth1 = kendall2D_tVec( self.nPt )
dJOrth1.SetTangentVector( np.subtract( dJOrth.tVector, dJOrth4.tVector ) )
# Orthogonal Parts
jOrth.SetTangentVector( np.add( np.multiply( cos( vNorm ), jOrth1.tVector ), np.multiply( cos( 2.0 * vNorm ), jOrth4.tVector ) ) )
dJOrth.SetTangentVector( np.add( np.multiply( np.sin( vNorm ) / vNorm, dJOrth1.tVector ), np.multiply( 0.5 * np.sin( 2.0 * vNorm ) / vNorm, dJOrth4.tVector ) ) )
J_dJ_mat = jTang.tVector + dJTang.tVector + jOrth.tVector + dJOrth.tVector
J_dJ = kendall2D_tVec( self.nPt )
J_dJ.SetTangentVector( J_DJ )
J = self.ParallelTranslate( v, J_dJ )
dJOrth_mat = jOrth1.ScalarMultiply( -vNorm * np.sin( vNorm ) ).tVector + jOrth4.ScalarMultiply( -2.0 * vNorm * sin( 2.0 * vNorm ) ).tVector
dJOrth.SetTangentVector( dJOrth_mat )
ddJOrth_mat = dJOrth1.ScalarMultiply( cos( vNorm ) ).tVector + djOrth4.ScalarMultiply( cos( 2.0 * vNorm ) ).tVector
ddJOrth = kendall2D_tVec( self.nPt )
ddJOrth.SetTangentVector( ddJOrth_mat )
dJ_ddJ_mat = djTang.tVector + dJOrth.tVector + ddJOrth.tVector
dJ_ddJ = kendall2D_tVec( self.nPt )
dJ = self.ParallelTranslate( v, dJ_ddJ )
return J, dJ
def AdjointGradientJacobi( self, v, Jac, dJac ):
vNorm = v.norm()
if( vNorm < 1.0e-12 ):
for i in range( self.nPt ):
for j in range( 2 ):
Jac.tVector[ j ][ i ] = Jac.tVector[ j ][ i ] + dJac.tVector[ j ][ i ]
Jac_Updated = Jac
dJac_Updated = dJac
return Jac_Updated, dJac_Updated
VdotJac = v.InnerProduct( Jac )
VdotJacPrime = v.InnerProduct( dJac )
scaleFactorJac = VdotJac / ( vNorm * vNorm )
scaleFactorJacPrime = VdotJacPrime / ( vNorm * vNorm )
jTang_mat = np.multiply( v.tVector, scaleFactorJac )
jTang = kendall2D_tVec( self.nPt )
jTang.SetTangentVector( jTang_mat )
dJacTang_mat = np.multiply( v.tVector, scaleFactorJacPrime )
dJacTang = kendall2D_tVec( self.nPt )
dJacTang.SetTangentVector( dJacTang_mat )
jOrth_mat = np.subtract( Jac.tVector, jTang_mat )
jOrth = kendall2D_tVec( self.nPt )
jOrth.SetTangentVector( jOrth_mat )
dJacOrth_mat = np.subtract( dJac.tVector, dJacTang_mat )
dJacOrth = kendall2D_tVec( self.nPt )
dJacOrth.SetTangentVector( dJacOrth_mat )
skew = np.zeros( [ 2, 2 ] )
skew[ 0, 1 ] = -1
skew[ 1, 0 ] = 1
unitV = v.ScalarMultiply( 1.0 / vNorm )
w_mat = np.matmul( skew, | |
from pliers import config
from pliers.filters import FrameSamplingFilter
from pliers.extractors import (GoogleVisionAPIFaceExtractor,
GoogleVisionAPILabelExtractor,
GoogleVisionAPIPropertyExtractor,
GoogleVisionAPISafeSearchExtractor,
GoogleVisionAPIWebEntitiesExtractor,
GoogleVideoIntelligenceAPIExtractor,
GoogleVideoAPILabelDetectionExtractor,
GoogleVideoAPIShotDetectionExtractor,
GoogleVideoAPIExplicitDetectionExtractor,
GoogleLanguageAPIExtractor,
GoogleLanguageAPIEntityExtractor,
GoogleLanguageAPISentimentExtractor,
GoogleLanguageAPISyntaxExtractor,
GoogleLanguageAPITextCategoryExtractor,
GoogleLanguageAPIEntitySentimentExtractor,
ExtractorResult,
merge_results)
from pliers.extractors.api.google import GoogleVisionAPIExtractor
from pliers.stimuli import ImageStim, VideoStim, TextStim
from pliers.utils import attempt_to_import, verify_dependencies
import pytest
import json
from os.path import join
from ...utils import get_test_data_path
import numpy as np
googleapiclient = attempt_to_import('googleapiclient', fromlist=['discovery'])
IMAGE_DIR = join(get_test_data_path(), 'image')
VIDEO_DIR = join(get_test_data_path(), 'video')
TEXT_DIR = join(get_test_data_path(), 'text')
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_extractor_inits():
ext = GoogleVisionAPIExtractor(num_retries=5)
assert ext.num_retries == 5
assert ext.max_results == 100
assert ext.service is not None
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_face_extractor_inits():
ext = GoogleVisionAPIFaceExtractor(num_retries=5)
assert ext.num_retries == 5
assert ext.max_results == 100
assert ext.service is not None
# Test parsing of individual response
filename = join(
get_test_data_path(), 'payloads', 'google_vision_api_face_payload.json')
response = json.load(open(filename, 'r'))
stim = ImageStim(join(get_test_data_path(), 'image', 'obama.jpg'))
res = ExtractorResult(response['faceAnnotations'], stim, ext)
df = res.to_df()
assert df['angerLikelihood'][0] == 'VERY_UNLIKELY'
assert df['landmark_LEFT_EYE_BOTTOM_BOUNDARY_y'][0] == 257.023
assert np.isnan(df['boundingPoly_vertex2_y'][0])
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_face_extractor():
ext = GoogleVisionAPIFaceExtractor(num_retries=5)
assert ext.validate_keys()
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'joyLikelihood' in result.columns
assert result['joyLikelihood'][0] == 'VERY_LIKELY'
assert float(result['face_detectionConfidence'][0]) > 0.7
ext = GoogleVisionAPIFaceExtractor(discovery_file='nogood')
assert not ext.validate_keys()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_multiple_face_extraction():
filename = join(get_test_data_path(), 'image', 'thai_people.jpg')
stim = ImageStim(filename)
# Only first record
ext = GoogleVisionAPIFaceExtractor()
result1 = ext.transform(stim).to_df(handle_annotations='first')
assert 'joyLikelihood' in result1.columns
# All records
ext = GoogleVisionAPIFaceExtractor()
result2 = ext.transform(stim).to_df()
assert 'joyLikelihood' in result2.columns
assert result2.shape[0] > result1.shape[0]
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_face_batch():
stims = ['apple', 'obama', 'thai_people']
stim_files = [join(get_test_data_path(), 'image', '%s.jpg' % s)
for s in stims]
stims = [ImageStim(s) for s in stim_files]
ext = GoogleVisionAPIFaceExtractor(batch_size=5)
result = ext.transform(stims)
result = merge_results(result, format='wide', extractor_names=False,
handle_annotations='first')
assert result.shape == (2, 139)
assert 'joyLikelihood' in result.columns
assert result['joyLikelihood'][0] == 'VERY_LIKELY'
assert result['joyLikelihood'][1] == 'VERY_LIKELY'
video = VideoStim(join(VIDEO_DIR, 'obama_speech.mp4'))
conv = FrameSamplingFilter(every=10)
video = conv.transform(video)
result = ext.transform(video)
result = merge_results(result, format='wide', extractor_names=False)
assert 'joyLikelihood' in result.columns
assert result.shape == (22, 139)
video = VideoStim(join(VIDEO_DIR, 'small.mp4'))
video = conv.transform(video)
result = ext.transform(video)
result = merge_results(result, format='wide', extractor_names=False)
assert 'joyLikelihood' not in result.columns
assert len(result) == 0
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_label_extractor():
ext = GoogleVisionAPILabelExtractor(num_retries=5)
assert ext.validate_keys()
filename = join(get_test_data_path(), 'image', 'apple.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'apple' in result.columns
assert result['apple'][0] > 0.75
url = 'https://tuition.utexas.edu/sites/all/themes/tuition/logo.png'
stim = ImageStim(url=url)
result = ext.transform(stim).to_df()
assert result['orange'][0] > 0.7
ext = GoogleVisionAPILabelExtractor(discovery_file='nogood')
assert not ext.validate_keys()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_properties_extractor():
ext = GoogleVisionAPIPropertyExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'apple.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert '158, 13, 29' in result.columns
assert np.isfinite(result['158, 13, 29'][0])
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_safe_search():
ext = GoogleVisionAPISafeSearchExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'adult' in result.columns
assert result['violence'][0] == 'VERY_UNLIKELY'
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_web_entities():
ext = GoogleVisionAPIWebEntitiesExtractor(num_retries=5)
filename = join(get_test_data_path(), 'image', 'obama.jpg')
stim = ImageStim(filename)
result = ext.transform(stim).to_df()
assert 'Barack Obama' in result.columns
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_vision_api_extractor_large():
default = config.get_option('allow_large_jobs')
default_large = config.get_option('large_job')
default_cache = config.get_option('cache_transformers')
config.set_option('allow_large_jobs', False)
config.set_option('large_job', 1)
config.set_option('cache_transformers', False)
ext = GoogleVisionAPILabelExtractor()
images = [ImageStim(join(IMAGE_DIR, 'apple.jpg')),
ImageStim(join(IMAGE_DIR, 'obama.jpg'))]
with pytest.raises(ValueError):
merge_results(ext.transform(images))
config.set_option('allow_large_jobs', True)
results = merge_results(ext.transform(images))
assert 'GoogleVisionAPILabelExtractor#apple' in results.columns
assert results.shape == (2, 32)
config.set_option('allow_large_jobs', default)
config.set_option('large_job', default_large)
config.set_option('cache_transformers', default_cache)
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_extractor(caplog):
ext = GoogleVideoIntelligenceAPIExtractor(timeout=1)
stim = VideoStim(join(VIDEO_DIR, 'park.mp4'))
result = ext.transform(stim)
log_message = caplog.records[-1].message
assert log_message == ("The extraction reached the timeout limit of %fs, "
"which means the API may not have finished analyzing the "
"video and the results may be empty or incomplete." % 1.0)
ext = GoogleVideoIntelligenceAPIExtractor(timeout=500,
features=['LABEL_DETECTION',
'SHOT_CHANGE_DETECTION'])
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 500))
if not incomplete:
assert result.shape == (1, 31)
assert result['onset'][0] == 0.0
assert result['duration'][0] > 0.5 and result['duration'][0] < 0.6
assert result['category_plant'][0] > 0.5
assert result['park'][0] > 0.5
assert result['shot_id'][0] == 0
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_extractor2(caplog):
segments = [{'startTimeOffset': '0.1s', 'endTimeOffset': '0.3s'},
{'startTimeOffset': '0.3s', 'endTimeOffset': '0.45s'}]
ext = GoogleVideoIntelligenceAPIExtractor(timeout=500, segments=segments,
features=['EXPLICIT_CONTENT_DETECTION'])
stim = VideoStim(join(VIDEO_DIR, 'park.mp4'))
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 500))
if not incomplete:
assert result.shape == (2, 5)
assert result['onset'][0] > 0.1 and result['onset'][0] < 0.3
assert result['onset'][1] > 0.3 and result['onset'][1] < 0.45
assert 'UNLIKELY' in result['pornographyLikelihood'][0]
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_label_extractor(caplog):
ext = GoogleVideoAPILabelDetectionExtractor(mode='FRAME_MODE',
stationary_camera=True)
stim = VideoStim(join(VIDEO_DIR, 'small.mp4'))
ex_result = ext.transform(stim)
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
result = ex_result.to_df()
assert result.shape == (7, 25)
assert 'category_toy' in result.columns
assert result['toy'][0] > 0.5
assert np.isclose(result['duration'][0], stim.duration, 0.1)
result = ex_result.to_df(format='long')
assert 'pornographyLikelihood' not in result['feature']
assert np.nan not in result['value']
ext = GoogleVideoAPILabelDetectionExtractor(mode='SHOT_MODE')
stim = VideoStim(join(VIDEO_DIR, 'shot_change.mp4'))
ex_result = ext.transform(stim)
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
raw = ex_result.raw['response']['annotationResults'][0]
assert 'shotLabelAnnotations' in raw
result = ex_result.to_df()
assert result.shape == (3, 17)
assert result['onset'][1] == 0.0
assert np.isclose(result['onset'][2], 3.2, 0.1)
assert np.isnan(result['cat'][1])
assert result['cat'][2] > 0.5
assert np.isnan(result['clock'][2])
assert result['clock'][1] > 0.5 or result['clock'][0] > 0.5
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_shot_extractor(caplog):
ext = GoogleVideoAPIShotDetectionExtractor(request_rate=3)
stim = VideoStim(join(VIDEO_DIR, 'small.mp4'))
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
assert result.shape == (1, 5)
assert result['onset'][0] == 0.0
assert np.isclose(result['duration'][0], stim.duration, 0.1)
assert 'shot_id' in result.columns
assert result['shot_id'][0] == 0
ext = GoogleVideoAPIShotDetectionExtractor()
stim = VideoStim(join(VIDEO_DIR, 'shot_change.mp4'))
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
assert result.shape == (2, 5)
assert np.isclose(result['onset'][1], 3.2, 0.1)
assert 'shot_id' in result.columns
assert result['shot_id'][1] == 1
@pytest.mark.long_test
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_video_api_explicit_extractor(caplog):
ext = GoogleVideoAPIExplicitDetectionExtractor(request_rate=3)
stim = VideoStim(join(VIDEO_DIR, 'small.mp4'), onset=4.2)
result = ext.transform(stim).to_df()
log_message = caplog.records[-1].message
incomplete = (log_message == ("The extraction reached the timeout limit of"
" %fs, which means the API may not have finished analyzing the"
" video and the results may be empty or incomplete." % 90))
if not incomplete:
assert result.shape[1] == 5
assert result['onset'][0] >= 4.2
assert 'pornographyLikelihood' in result.columns
assert 'UNLIKELY' in result['pornographyLikelihood'][0]
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPIExtractor(features=['classifyText',
'extractEntities'])
stim = TextStim(text='hello world')
with pytest.raises(googleapiclient.errors.HttpError):
# Should fail because too few tokens
ext.transform(stim)
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (43, 10)
assert 'category_/Books & Literature' in result.columns
assert result['category_/Books & Literature'][0] > 0.5
irene = result[result['text'] == '<NAME>']
assert (irene['type'] == 'PERSON').all()
assert not irene['metadata_wikipedia_url'].isna().any()
# Document row shouldn't have entity features, and vice versa
assert np.isnan(result.iloc[0]['text'])
assert np.isnan(result.iloc[1]['category_/Books & Literature']).all()
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_entity_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPIEntityExtractor()
stim = TextStim(join(TEXT_DIR, 'sample_text_with_entities.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (10, 9)
assert result['text'][0] == 'Google'
assert result['type'][0] == 'ORGANIZATION'
assert result['salience'][0] > 0.0 and result['salience'][0] < 0.5
assert result['begin_char_index'][4] == 165.0
assert result['end_char_index'][4] == 172.0
assert result['text'][4] == 'Android'
assert result['type'][4] == 'CONSUMER_GOOD'
@pytest.mark.requires_payment
@pytest.mark.skipif("'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ")
def test_google_language_api_sentiment_extractor():
verify_dependencies(['googleapiclient'])
ext = GoogleLanguageAPISentimentExtractor()
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
result = ext.transform(stim).to_df(timing=False, object_id='auto')
assert result.shape == (12, 7)
assert 'sentiment_magnitude' in result.columns
assert 'text' in result.columns
doc_sentiment = result['sentiment_score'][11]
| |
<filename>ufora/cumulus/test/CheckpointingTest_test.py<gh_stars>100-1000
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import time
import unittest
import ufora.cumulus.test.InMemoryCumulusSimulation as InMemoryCumulusSimulation
import ufora.distributed.S3.InMemoryS3Interface as InMemoryS3Interface
import ufora.native.CallbackScheduler as CallbackScheduler
import ufora.native.Cumulus as CumulusNative
import ufora.native.Hash as HashNative
#import ufora.distributed.Storage.HdfsObjectStore as HdfsObjectStore
callbackScheduler = CallbackScheduler.singletonForTesting()
#note that we are being careful not to use anything from the builtins in these examples
#so that checkpointing is fast. Currently, "FORA.eval" doesn't use the module subsampler,
#so we end up holding the entire builtins if we use it.
def expensiveChildCachecalls(ix):
return """
let sumf = fun(a,b) {
if (a+1 >= b)
return [sum(a, 10**12) + %s]
let mid = (a+b)/2
return sumf(a,mid) + sumf(mid,b)
}
sumf(0,10)
""" % ix
vecOfVecCalcText = """
let sum = fun(a,b,f) {
if (a >= b) return 0
if (a+1 >= b) return f(a)
let mid = (a+b)/2;
return sum(a,mid,f) + sum(mid,b,f)
}
let res = [];
let ix = 0;
while (ix < 100) {
res = res + sum(0,100, fun(x) { [[sum(x, 10**6, fun(x){x+1})].paged].paged })
ix = ix + 1
}
res.sum()
"""
vecLoopSumText = """
let res = 0;
let ix = 0;
while (ix < 1000000) {
res = res + Vector.range(100000).sum()
ix = ix + 1
}
res
"""
repeatedVecInLoop = """
let v = Vector.range(1000000)
let ix = 0
while (ix < 1000000) {
v = v.apply({_+1}).paged
ix = ix + 1
}
v.sum()
"""
simpleSumInLoopText = """
let v = Vector.range(1000000).paged;
let sum = fun(a,b,f) {
if (a >= b) return 0
if (a+1 >= b) return f(a)
let mid = (a+b)/2;
return sum(a,mid,f) + sum(mid,b,f)
}
let res = 0;
let ix = 0
while (ix < 100) {
ix = ix + 1
res = res + sum(ix, Int64(10**9), {if (_ % 16 == 0) v[(_ * 1024) % size(v)] else _ });
}
res
"""
sumInLoopText = """
let sum = fun(a,b,f) {
if (a >= b) return 0
if (a+1 >= b) return f(a)
let mid = (a+b)/2;
return sum(a,mid,f) + sum(mid,b,f)
}
let res = 0;
let ix = 0;
while (ix < 10000) {
let bound = match(ix%3) with (0) { 1 } (1) { 10 } (2) { 100 };
res = res + sum(0,bound * 1000000, fun(x) { x + 1 })
ix = ix + 1
}
res
"""
bigSumText = """
let sum = fun(a,b,f) {
if (a >= b) return 0
if (a+1 >= b) return f(a)
let mid = (a+b)/2;
return sum(a,mid,f) + sum(mid,b,f)
}
sum(0,10**13,{_})
"""
cachedBigSumText = """
let sum = fun(a,b,f) {
if (a >= b) return 0
if (a+1 >= b) return f(a)
let mid = (a+b)/2;
return sum(a,mid,f) + sum(mid,b,f)
}
cached(sum(0,10**13,{_}))[0]
"""
TIMEOUT = 30
class CheckpointingTest(unittest.TestCase):
def waitForAllCheckpointsToClear(self, simulation, timeout = TIMEOUT):
t0 = time.time()
while time.time() < t0 + timeout:
if not simulation.getGlobalScheduler().anyOutstandingTriggeredCheckpoints():
return
time.sleep(0.01)
assert False, "timed out"
def timestampOfMostRecentFullCheckpoint(self, simulation, onlyUnfinished = True, onlySuccessful = True):
if not simulation.getGlobalScheduler():
return None
statuses = simulation.getGlobalScheduler().currentOutstandingCheckpointStatuses(onlyUnfinished, True)
if len(statuses):
(computation, (checkpointStatus, checkpointRequest)) = statuses[0]
timestamp = checkpointRequest.timestamp
isFull = checkpointRequest.writeToStorage and (not onlySuccessful or checkpointStatus.checkpointSuccessful)
if isFull:
return timestamp
def timestampOfMostRecentCheckpoint(self, simulation):
if not simulation.getGlobalScheduler():
return None
statuses = simulation.getGlobalScheduler().currentOutstandingCheckpointStatuses(True, False)
if len(statuses):
(computation, (checkpointStatus, checkpointRequest)) = statuses[0]
return checkpointRequest.timestamp
def timeElapsedOfMostRecentCheckpoints(self, simulation, onlyUnfinished = True, onlyCommitted = False):
if not simulation.getGlobalScheduler():
return {}
statuses = simulation.getGlobalScheduler().currentOutstandingCheckpointStatuses(onlyUnfinished, onlyCommitted)
return {status[0]: status[1][0].statistics.timeElapsed.timeSpentInCompiledCode for status in statuses}
def totalTimeElapsedOfMostRecentCheckpoints(self, simulation, onlyUnfinished = True, onlyCommitted = False):
return sum(self.timeElapsedOfMostRecentCheckpoints(simulation, onlyUnfinished, onlyCommitted).values(), 0)
def waitForCheckpoint(self, simulation, priorCheckpoint = None, checkInterval = 0.1, onlyUnfinished = True):
t1 = time.time()
foundFullCheckpoint = False
while time.time() - t1 < TIMEOUT and not foundFullCheckpoint:
scheduler = simulation.getGlobalScheduler()
if scheduler:
statuses = simulation.getGlobalScheduler().currentOutstandingCheckpointStatuses(onlyUnfinished, False)
if statuses:
checkpointSecondsElapsed = statuses[0][1][0].statistics.timeElapsed.timeSpentInCompiledCode
if priorCheckpoint is None or priorCheckpoint < checkpointSecondsElapsed:
foundFullCheckpoint = True
if not foundFullCheckpoint:
time.sleep(checkInterval)
if foundFullCheckpoint:
return checkpointSecondsElapsed
def waitForNFullCheckpoints(self, simulation, count, checkInterval = 0.1):
t1 = time.time()
while time.time() - t1 < TIMEOUT:
scheduler = simulation.getGlobalScheduler()
found = []
if scheduler:
statuses = simulation.getGlobalScheduler().currentOutstandingCheckpointStatuses(True, True)
for (computation, (stats, checkpoint)) in statuses:
if checkpoint.writeToStorage and stats.checkpointSuccessful:
checkpointSecondsElapsed = stats.statistics.timeElapsed.timeSpentInCompiledCode
found.append(checkpointSecondsElapsed)
if len(found) < count:
time.sleep(checkInterval)
else:
return found
def waitForFullCheckpoint(self, simulation, priorCheckpoint = None, checkInterval = 0.1, onlyUnfinished = True, onlySuccessful = False):
t1 = time.time()
foundFullCheckpoint = False
while time.time() - t1 < TIMEOUT and not foundFullCheckpoint:
scheduler = simulation.getGlobalScheduler()
if scheduler:
statuses = simulation.getGlobalScheduler().currentOutstandingCheckpointStatuses(onlyUnfinished, True)
if statuses:
(computation, (stats, checkpoint)) = statuses[0]
if checkpoint.writeToStorage and (not onlySuccessful or stats.checkpointSuccessful):
checkpointSecondsElapsed = stats.statistics.timeElapsed.timeSpentInCompiledCode
if priorCheckpoint is None or priorCheckpoint < checkpointSecondsElapsed:
foundFullCheckpoint = True
if not foundFullCheckpoint:
time.sleep(checkInterval)
if foundFullCheckpoint:
return checkpointSecondsElapsed
def createSimulation(self,
useHdfsObjectStore=False,
objectStore=None,
sharedStateViewFactory=None,
workerCount=4,
machineIdHashSeed=None,
s3Service=None
):
s3 = s3Service or InMemoryS3Interface.InMemoryS3InterfaceFactory()
return InMemoryCumulusSimulation.InMemoryCumulusSimulation(
workerCount,
1,
memoryPerWorkerMB=100,
threadsPerWorker=2,
s3Service=s3,
objectStore=objectStore,
sharedStateViewFactory=sharedStateViewFactory,
machineIdHashSeed=machineIdHashSeed
)
def test_checkpointingCumulusClientRequestPathway(self):
simulation = self.createSimulation()
#give the simulation a couple of seconds to pick a scheduler
t0 = time.time()
while simulation.getGlobalScheduler() is None:
time.sleep(0.01)
self.assertTrue(time.time() - t0 < 2.0)
simulation.getGlobalScheduler().setCheckpointStatusInterval(0.0001)
count = 0
try:
simulation.submitComputation(simpleSumInLoopText)
while time.time() - t0 < 2.0:
result = simulation.getCurrentCheckpointStatistics(timeout = TIMEOUT)
count = count + 1
self.assertTrue(count > 10)
print "Total roundtrips: ", count
finally:
simulation.teardown()
def test_checkpointingSystemWritesToS3(self):
simulation = self.createSimulation()
self.assertTrue(len(simulation.objectStore.listValues()) == 0)
try:
#give the simulation a couple of seconds to pick a scheduler
t0 = time.time()
while simulation.getGlobalScheduler() is None:
time.sleep(0.01)
self.assertTrue(time.time() - t0 < TIMEOUT, "never got a scheduler")
simulation.getGlobalScheduler().setCheckpointStatusInterval(0.01)
simulation.submitComputation(simpleSumInLoopText)
time.sleep(1.0)
count = 0
lastCheckpoint = None
while time.time() - t0 < 20.0:
simulation.getGlobalScheduler().triggerFullCheckpointsOnOutstandingComputations()
foundFullCheckpoint = False
t1 = time.time()
while time.time() - t1 < 10.0 and not foundFullCheckpoint:
statuses = simulation.getGlobalScheduler().currentOutstandingCheckpointStatuses(True, True)
if len(statuses):
(computation, (stats, checkpoint)) = statuses[0]
newCheckpoint = checkpoint.timestamp
if lastCheckpoint is None or newCheckpoint != lastCheckpoint:
lastCheckpoint = newCheckpoint
if checkpoint.writeToStorage and stats.checkpointSuccessful:
foundFullCheckpoint = True
time.sleep(.1)
self.assertTrue(foundFullCheckpoint)
count += 1
logging.info(
"Total: %d after %s with %d files.",
count,
time.time() - t0,
len(simulation.objectStore.listValues())
)
self.assertGreater(
len(simulation.objectStore.listValues()),
0)
guids = simulation.getWorkerVdm(0).getPersistentCacheIndex().allCheckpointedComputationGuids()
self.assertGreater(
len(guids),
0,
"Didn't write the checkpoint to the persistent cache"
)
for item in simulation.objectStore.listValues():
simulation.objectStore.deleteValue(item[0])
except:
simulation.dumpSchedulerEventStreams()
raise
finally:
simulation.teardown()
def test_checkpointingRecoverySimpleSum(self):
self.recoveryTest(simpleSumInLoopText)
def test_checkpointingRecoveryVecLoop(self):
for ix in range(3):
self.recoveryTest(
repeatedVecInLoop,
interval1 = 1.0,
interval2 = 1.0,
interval3 = 1.0,
initialWorkers = 4,
workersToDrop1 = 3,
workersToAdd1 = 3,
machineIdHashSeed=str(ix)
)
def test_s3DatasetComputationHashesAreStable(self):
for ix in range(2):
s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()
s3().setKeyValue(
"bucketname",
"key",
"this is some data"
)
self.recoveryTest("""
let data = cached(fun() { datasets.s3("bucketname","key") }())[0];
cached(fun() { sum(0,10**12, fun(ix) { data[ix % size(data)] }) }())[0]
""",
s3Service = s3,
interval1 = 1.0,
interval2 = 1.0,
interval3 = 1.0,
interval4 = 1.0,
workersToDrop1 = 3,
workersToAdd1 = 3,
workersToDrop2 = 3,
workersToAdd2 = 3,
machineIdHashSeed=str(ix)
)
def test_recoveryWithUnreadDatasetsS3(self):
s3 = InMemoryS3Interface.InMemoryS3InterfaceFactory()
s3().setKeyValue(
"bucketname",
"key",
"this is some data"
)
simulation = self.createSimulation(s3Service = s3)
try:
#give the simulation a couple of seconds to pick a scheduler
self.assertTrue(simulation.waitForGlobalScheduler(timeout=2.0))
simulation.submitComputation("""
let data = datasets.s3("bucketname","key")
let res = sum(0,10**12)
data[res % 2]
""")
time.sleep(1.0)
simulation.getGlobalScheduler().triggerFullCheckpointsOnOutstandingComputations()
self.waitForAllCheckpointsToClear(simulation)
finally:
simulation.teardown()
def recoveryTest(self,
text,
interval1 = 5.0,
interval2 = 1.0,
interval3 = 1.0,
interval4 = 1.0,
initialWorkers = 4,
workersToDrop1 = 1,
workersToAdd1 = 0,
workersToDrop2 = 1,
workersToAdd2 = 0,
machineIdHashSeed=None,
s3Service=None
):
simulation = self.createSimulation(machineIdHashSeed=machineIdHashSeed, workerCount = initialWorkers, s3Service = s3Service)
try:
self.assertTrue(len(simulation.objectStore.listValues()) == 0)
#give the simulation a couple of seconds to pick a scheduler
self.assertTrue(simulation.waitForGlobalScheduler(timeout=2.0))
simulation.submitComputation(text)
time.sleep(interval1)
simulation.getGlobalScheduler().triggerFullCheckpointsOnOutstandingComputations()
self.waitForAllCheckpointsToClear(simulation)
checkpointSecondsElapsed = self.totalTimeElapsedOfMostRecentCheckpoints(simulation, onlyUnfinished=False, onlyCommitted=True)
time.sleep(interval2)
for _ in range(workersToDrop1):
simulation.dropTopWorker()
for _ in range(workersToAdd1):
simulation.addWorker()
self.assertTrue(simulation.waitForHandshake())
time.sleep(interval3)
simulation.getGlobalScheduler().triggerFullCheckpointsOnOutstandingComputations()
self.waitForAllCheckpointsToClear(simulation)
checkpointSecondsElapsed2 = self.totalTimeElapsedOfMostRecentCheckpoints(simulation, onlyUnfinished=False, onlyCommitted=True)
self.assertTrue(
checkpointSecondsElapsed2 | |
import re
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from helpers import auditchecker
from view_models import sidebar as sidebar_constants, clients_table_vm, members_table, \
keys_and_certificates_table as keyscertificates_constants, popups as popups, messages, \
groups_table, central_services, log_constants
from view_models.clients_table_vm import DETAILS_TAB_CSS
from view_models.log_constants import ADD_MEMBER_FAILED, EDIT_MEMBER_NAME_FAILED, GENERATE_KEY_FAILED, ADD_WSDL_FAILED, \
EDIT_MEMBER_NAME
from view_models.messages import get_error_message
def test_key_label_inputs():
def test_case(self):
parse_key_label_inputs(self)
return test_case
def test_csr_inputs():
def test_case(self):
parse_csr_inputs(self)
return test_case
def test_ss_client_inputs():
def test_case(self):
"""
MEMBER_47 step 3 System verifies security server client input
:param self: MainController object
:return: None
"""
'''Open security server clients tab'''
self.log('Open security server clients tab')
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar_constants.CLIENTS_BTN_CSS).click()
'''Loop through clients members and subsystems codes and expected results'''
counter = 1
for add_client_data in clients_table_vm.MEMBER_SUBSYSTEM_CODE_AND_RESULTS:
member_code = add_client_data[0]
subsystem_code = add_client_data[1]
error = add_client_data[2]
error_message = add_client_data[3]
error_message_label = add_client_data[4]
whitespaces = add_client_data[5]
self.log('TEST-{0}'.format(counter))
'''Add client'''
add_ss_client(self, member_code, subsystem_code)
'''Verify error messages'''
error_messages(self, error, error_message, error_message_label)
if error:
'''MEMBER 47/3a3a SS administrator selects to terminate the use case.'''
self.log('Click on "Cancel" button')
self.wait_until_visible(type=By.XPATH, element=popups.ADD_CLIENT_POPUP_CANCEL_BTN_XPATH).click()
else:
self.log('Click on "CONTINUE" button')
self.wait_until_visible(type=By.XPATH, element=popups.WARNING_POPUP_CONTINUE_XPATH).click()
self.log('Click on "CONFIRM" button')
popups.confirm_dialog_click(self)
'''MEMBER 54 2. System verifies that mandatory fields are filled.'''
self.log('''MEMBER 54 2. System verifies that mandatory fields are filled.''')
'''MEMBER 54 3. System verifies that the user input does not exceed 255 characters.'''
self.log('''MEMBER 54 3. System verifies that the user input does not exceed 255 characters.''')
self.log('Find added Member Code == "' + member_code + ', Subsystem Code == ' + subsystem_code)
self.wait_jquery()
client_id = self.wait_until_visible(type=By.XPATH, element=clients_table_vm.
get_client_id_by_member_code_subsystem_code(member_code.strip(),
subsystem_code.strip()))
client_id_text = client_id.text
self.log(client_id_text)
if whitespaces:
'''MEMBER 54 1. System removes leading and trailing whitespaces.'''
self.log('''MEMBER 54 1. System removes leading and trailing whitespaces.''')
find_text_with_whitespaces(self, member_code, client_id_text)
find_text_with_whitespaces(self, subsystem_code, client_id_text)
else:
assert member_code and subsystem_code in client_id_text
'''Delete the added client'''
delete_added_client(self, client_id)
counter += 1
self.wait_jquery()
return test_case
def test_edit_wsdl_inputs():
def test_case(self):
"""
SERVICE_09 step 3 Verifies WSDL url
:param self: MainController object
:return: None
"""
self.log('Open security server clients tab')
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar_constants.CLIENTS_BTN_CSS).click()
member_code = clients_table_vm.ONE_SS_CLIENT[0]
subsystem_code = clients_table_vm.ONE_SS_CLIENT[1]
'''Add client'''
add_ss_client(self, member_code, subsystem_code)
self.wait_jquery()
self.log('Click on "CONTINUE" button')
self.wait_until_visible(type=By.XPATH, element=popups.WARNING_POPUP_CONTINUE_XPATH).click()
self.log('Click on "CONFIRM" button')
popups.confirm_dialog_click(self)
self.log('Find added Member Code == "' + member_code + ', Subsystem Code == ' + subsystem_code)
client_row = self.wait_until_visible(type=By.XPATH, element=clients_table_vm.
get_client_id_by_member_code_subsystem_code(member_code,
subsystem_code))
counter = 1
management_wsdl_url = self.config.get('wsdl.management_service_wsdl_url')
cs_host = self.config.get('cs.ssh_host')
ss_2_ssh_host = self.config.get('ss2.ssh_host')
ss_2_ssh_user = self.config.get('ss2.ssh_user')
ss_2_ssh_pass = self.config.get('ss2.ssh_pass')
self.wait_jquery()
self.log("Open client details")
client_row.find_element_by_css_selector(DETAILS_TAB_CSS).click()
add_wsdl_url(self, management_wsdl_url)
self.wait_jquery()
'''Open WSDL URL services'''
self.log('Click on added wsdl url - {0}'.format(management_wsdl_url))
self.wait_until_visible(type=By.XPATH,
element=popups.get_wsdl_url_row(management_wsdl_url)).click()
self.wait_jquery()
self.log('Click on "CLOSE" button')
self.wait_until_visible(type=By.XPATH, element=popups.CLIENT_DETAILS_POPUP_CLOSE_BTN_XPATH).click()
log_checker = auditchecker.AuditChecker(host=ss_2_ssh_host, username=ss_2_ssh_user, password=<PASSWORD>)
'''Loop through wsdl url's'''
for wsdl_data in clients_table_vm.WSDL_DATA:
current_log_lines = log_checker.get_line_count()
wsdl_url = wsdl_data[0].format(management_wsdl_url, cs_host)
error = wsdl_data[1]
error_message = wsdl_data[2]
error_message_label = wsdl_data[3]
whitespaces = wsdl_data[4]
'''Generate long inputs'''
long_wsdl_url = wsdl_url.split('#')
try:
if long_wsdl_url[1] == '255':
multiplier = int(long_wsdl_url[1]) - len(long_wsdl_url[0]) - len(long_wsdl_url[2])
wsdl_url = long_wsdl_url[0] + multiplier * 'A' + long_wsdl_url[2]
elif long_wsdl_url[1] == '256':
multiplier = int(long_wsdl_url[1]) - len(long_wsdl_url[0]) - len(long_wsdl_url[2])
wsdl_url = long_wsdl_url[0] + multiplier * 'A' + long_wsdl_url[2]
except:
pass
self.log('TEST - {0}'.format(counter))
self.log("Open client details")
client_row.find_element_by_css_selector(DETAILS_TAB_CSS).click()
self.wait_jquery()
self.log("Open 'Services' tab")
self.wait_until_visible(type=By.XPATH, element=clients_table_vm.SERVICES_TAB_XPATH).click()
self.wait_jquery()
'''SERVICE 09/1 SS administrator selects to edit the URL of a WSDL.'''
self.log('Click on "Edit" button')
self.wait_until_visible(type=By.ID, element=popups.EDIT_WSDL_BUTTON_ID).click()
self.wait_jquery()
'''SERVICE 09/2 SS administrator inserts the new URL of the WSDL.'''
self.log('Enter wsdl url (string length = {0}) - {1}'.format(len(wsdl_url), wsdl_url))
url_field = self.wait_until_visible(type=By.ID, element=popups.EDIT_WSDL_POPUP_URL_ID)
self.input(url_field, wsdl_url)
self.wait_jquery()
self.log('Click on "OK" button')
self.wait_until_visible(type=By.XPATH, element=popups.EDIT_WSDL_POPUP_OK_BTN_XPATH).click()
'''SERVICE 09/3 System parses the user input:'''
'''Verify error messages'''
error_messages(self, error, error_message, error_message_label)
self.wait_jquery()
if error:
'''SERVICE 09/3a3a SS administrator selects to terminate the use case.'''
logs_found = log_checker.check_log(log_constants.EDIT_WSDL_FAILED, from_line=current_log_lines + 1)
self.is_true(logs_found, msg="Edit wsdl failed not found in audit log")
self.log('Click on "Cancel" button')
self.wait_until_visible(type=By.XPATH, element=popups.EDIT_WSDL_POPUP_CANCEL_BTN_XPATH).click()
else:
'''SERVICE 11 2. System verifies that mandatory fields are filled.'''
self.log('''SERVICE 11 2. System verifies that mandatory fields are filled.''')
'''SERVICE 11 3. System verifies that the user input does not exceed 255 characters.'''
self.log('''SERVICE 11 3. System verifies that the user input does not exceed 255 characters.''')
self.log('Find added WSDL URL row number - ' + wsdl_url)
found_wsdl_url = self.wait_until_visible(type=By.CSS_SELECTOR,
element=popups.CLIENT_DETAILS_POPUP_WSDL_CSS)
found_wsdl_url = found_wsdl_url.text
if whitespaces:
'''SERVICE 11 1. System removes leading and trailing whitespaces.'''
self.log('''SERVICE 11 1. System removes leading and trailing whitespaces.''')
find_text_with_whitespaces(self, wsdl_url, found_wsdl_url)
else:
assert wsdl_url in found_wsdl_url
self.log('Found WSDL URL - ' + found_wsdl_url)
'''Close details window'''
self.log('Click on "CLOSE" button')
self.wait_until_visible(type=By.XPATH, element=popups.CLIENT_DETAILS_POPUP_CLOSE_BTN_XPATH).click()
counter += 1
'''Delete added client'''
client_row = self.wait_until_visible(type=By.XPATH, element=clients_table_vm.
get_client_id_by_member_code_subsystem_code(member_code,
subsystem_code))
delete_added_client(self, client_row)
return test_case
def test_disable_wsdl_inputs():
def test_case(self):
"""
SERVICE_13 step 4 Verifies WSDL url
:param self: MainController object
:return: None
"""
self.log('Open security server clients tab')
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar_constants.CLIENTS_BTN_CSS).click()
member_code = clients_table_vm.ONE_SS_CLIENT[0]
subsystem_code = clients_table_vm.ONE_SS_CLIENT[1]
'''Add client'''
add_ss_client(self, member_code, subsystem_code)
self.log('Click on "CONTINUE" button')
self.wait_until_visible(type=By.XPATH, element=popups.WARNING_POPUP_CONTINUE_XPATH).click()
self.log('Click on "CONFIRM" button')
popups.confirm_dialog_click(self)
self.log('Find added Member Code == "' + member_code + ', Subsystem Code == ' + subsystem_code)
client_row = self.wait_until_visible(type=By.XPATH, element=clients_table_vm.
get_client_id_by_member_code_subsystem_code(member_code,
subsystem_code))
self.wait_jquery()
'''Add wsdl url'''
self.log("Open client details")
client_row.find_element_by_css_selector(DETAILS_TAB_CSS).click()
add_wsdl_url(self, self.config.get('wsdl.management_service_wsdl_url'))
self.log('Click on WSDL url row')
self.wait_until_visible(type=By.CSS_SELECTOR, element=popups.CLIENT_DETAILS_POPUP_WSDL_CSS).click()
wsdl_disabled = True
counter = 1
ss_2_ssh_host = self.config.get('ss2.ssh_host')
ss_2_ssh_user = self.config.get('ss2.ssh_user')
ss_2_ssh_pass = self.config.get('ss2.ssh_pass')
log_checker = auditchecker.AuditChecker(host=ss_2_ssh_host, username=ss_2_ssh_user, password=<PASSWORD>)
'''Loop through inputs and expected results'''
for wsdl_disable_notice in clients_table_vm.WSDL_DISABLE_NOTICES:
current_log_lines = log_checker.get_line_count()
notice = wsdl_disable_notice[0]
error = wsdl_disable_notice[1]
error_message = wsdl_disable_notice[2]
error_message_label = wsdl_disable_notice[3]
self.log('TEST - {0}'.format(counter) + str(counter) + '. Notice == "' + notice + '"')
if wsdl_disabled:
self.log('Click on "ENABLE" button')
self.wait_until_visible(type=By.ID,
element=popups.CLIENT_DETAILS_POPUP_ENABLE_WSDL_BTN_ID).click()
'''SERVICE_13/1 SS administrator selects to disable a WSDL.'''
self.log('Click on "DISABLE" button')
self.wait_until_visible(type=By.ID,
element=popups.CLIENT_DETAILS_POPUP_DISABLE_WSDL_BTN_ID).click()
'''SERVICE_13/2 System asks for notice message that will be sent as a response to service clients trying
to access services described in the WSDL'''
'''SERVICE_13/3 SS administrator inserts the message.'''
self.log('Add notice (string length = {0})- "{1}"'.format(len(notice), notice))
notice_field = self.wait_until_visible(type=By.ID,
element=popups.DISABLE_WSDL_POPUP_NOTICE_ID)
self.input(notice_field, notice)
self.log('Click on "OK" button')
self.wait_until_visible(type=By.XPATH,
element=popups.DISABLE_WSDL_POPUP_OK_BTN_XPATH).click()
'''SERVICE 13/4 System parses the user input:'''
'''Verify error messages'''
error_messages(self, error, error_message, error_message_label)
if error:
self.log('SERVICE_13 4a2 audit log contains disable wsdl failed when disabling fails')
logs_found = log_checker.check_log(log_constants.DISABLE_WSDL_FAILED, from_line=current_log_lines + 1)
self.is_true(logs_found, msg="Disable wsdl failed not found in audit log")
'''SERVICE 13/4a.3a SS administrator selects to terminate the use case.'''
self.log('Click on "CANCEL" button')
self.wait_until_visible(type=By.XPATH,
element=popups.DISABLE_WSDL_POPUP_CANCEL_BTN_XPATH).click()
wsdl_disabled = False
else:
wsdl_disabled = True
self.wait_jquery()
self.log('Click on "CLOSE" button')
self.wait_until_visible(type=By.XPATH, element=popups.CLIENT_DETAILS_POPUP_CLOSE_BTN_XPATH).click()
self.log('Delete added client')
delete_added_client(self, client_row)
counter += 1
return test_case
def edit_service(self, service_url, service_timeout=None, verify_tls=None):
'''
Tries to enter WSDL url to "Edit WSDL Parameters" dialog URL input field and press "OK"
:param self:
:param url: str - URL that contains the WSDL
:param clear_field: Boolean - clear the field before entering anything
:return:
'''
self.log('Setting new service URL with timeout {1}: {0}'.format(service_timeout, service_url))
# Find the "Edit Service Parameters" dialog. Because this function can be called from a state where the dialog is open and
# a state where it is not, we'll first check if the dialog is open. If it is not, we'll click the "Edit"
# button to open it.
wsdl_dialog = self.by_xpath(popups.EDIT_SERVICE_POPUP_XPATH)
# Open the dialog if it is not already open
if not wsdl_dialog.is_displayed():
# Find "Edit" button and click it.
edit_wsdl_button = self.by_id(popups.CLIENT_DETAILS_POPUP_EDIT_WSDL_BTN_ID)
edit_wsdl_button.click()
# Find the dialog and wait until it is visible.
self.wait_until_visible(wsdl_dialog)
# Now an "Edit Service Parameters" dialog with a URL prompt should be open. Let's try to set the service URL.
# Find the URL input element
service_url_input = self.by_id(popups.EDIT_SERVICE_POPUP_URL_ID)
service_timeout_input = self.by_id(popups.EDIT_SERVICE_POPUP_TIMEOUT_ID)
# Enter the service URL.
self.input(service_url_input, service_url)
# Set service timeout if specified
if service_timeout is not None:
# UC SERVICE_timeout_input.clear()
# UC SERVICE_timeout_input.send_keys(service_timeout)
self.input(service_timeout_input, service_timeout)
# Set "Verify TLS" if specified
if verify_tls is not None:
service_tls_checkbox = self.wait_until_visible(popups.EDIT_SERVICE_POPUP_TLS_ENABLED_XPATH, By.XPATH)
checked = service_tls_checkbox.get_attribute('checked')
if ((checked != '' and checked is not None) and not verify_tls) or (checked is None and verify_tls):
service_tls_checkbox.click()
# Find the "OK" button in "Edit WSDL Parameters" dialog
wsdl_dialog_ok_button = self.by_xpath(popups.EDIT_SERVICE_POPUP_OK_BTN_XPATH)
wsdl_dialog_ok_button.click()
# Clicking the button starts an | |
# -*- coding: utf-8 -*-
# Copyright 2020 The TensorFlowTTS Team and <NAME> (@kan-bayashi)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parallel-wavegan Modules. Based on pytorch implementation (https://github.com/kan-bayashi/ParallelWaveGAN)"""
import tensorflow as tf
def get_initializer(initializer_seed=42):
"""Creates a `tf.initializers.he_normal` with the given seed.
Args:
initializer_seed: int, initializer seed.
Returns:
HeNormal initializer with seed = `initializer_seed`.
"""
return tf.keras.initializers.HeNormal(seed=initializer_seed)
class TFConv1d1x1(tf.keras.layers.Conv1D):
"""1x1 Conv1d with customized initialization."""
def __init__(self, filters, use_bias, padding, initializer_seed, **kwargs):
"""Initialize 1x1 Conv1d module."""
super().__init__(
filters=filters,
kernel_size=1,
strides=1,
padding=padding,
dilation_rate=1,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
**kwargs,
)
class TFConv1d(tf.keras.layers.Conv1D):
"""Conv1d with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv1d module."""
initializer_seed = kwargs.pop("initializer_seed", 42)
super().__init__(
*args, **kwargs, kernel_initializer=get_initializer(initializer_seed)
)
class TFResidualBlock(tf.keras.layers.Layer):
"""Residual block module in WaveNet."""
def __init__(
self,
kernel_size=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
dropout_rate=0.0,
dilation_rate=1,
use_bias=True,
use_causal_conv=False,
initializer_seed=42,
**kwargs,
):
"""Initialize ResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
residual_channels (int): Number of channels for residual connection.
skip_channels (int): Number of channels for skip connection.
aux_channels (int): Local conditioning channels i.e. auxiliary input dimension.
dropout_rate (float): Dropout probability.
dilation_rate (int): Dilation factor.
use_bias (bool): Whether to add bias parameter in convolution layers.
use_causal_conv (bool): Whether to use use_causal_conv or non-use_causal_conv convolution.
initializer_seed (int32): initializer seed.
"""
super().__init__(**kwargs)
self.dropout_rate = dropout_rate
# no future time stamps available
self.use_causal_conv = use_causal_conv
# dilation conv
self.conv = TFConv1d(
filters=gate_channels,
kernel_size=kernel_size,
padding="same" if self.use_causal_conv is False else "causal",
strides=1,
dilation_rate=dilation_rate,
use_bias=use_bias,
initializer_seed=initializer_seed,
)
# local conditionong
if aux_channels > 0:
self.conv1x1_aux = TFConv1d1x1(
gate_channels,
use_bias=False,
padding="same",
initializer_seed=initializer_seed,
name="conv1x1_aux",
)
else:
self.conv1x1_aux = None
# conv output is split into two groups
gate_out_channels = gate_channels // 2
self.conv1x1_out = TFConv1d1x1(
residual_channels,
use_bias=use_bias,
padding="same",
initializer_seed=initializer_seed,
name="conv1x1_out",
)
self.conv1x1_skip = TFConv1d1x1(
skip_channels,
use_bias=use_bias,
padding="same",
initializer_seed=initializer_seed,
name="conv1x1_skip",
)
self.dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
def call(self, x, c, training=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, residual_channels, T).
c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T).
Returns:
Tensor: Output tensor for residual connection (B, T, residual_channels).
Tensor: Output tensor for skip connection (B, T, skip_channels).
"""
residual = x
x = self.dropout(x, training=training)
x = self.conv(x)
# split into two part for gated activation
xa, xb = tf.split(x, 2, axis=-1)
# local conditioning
if c is not None:
assert self.conv1x1_aux is not None
c = self.conv1x1_aux(c)
ca, cb = tf.split(c, 2, axis=-1)
xa, xb = xa + ca, xb + cb
x = tf.nn.tanh(xa) * tf.nn.sigmoid(xb)
# for skip connection
s = self.conv1x1_skip(x)
# for residual connection
x = self.conv1x1_out(x)
x = (x + residual) * tf.math.sqrt(0.5)
return x, s
class TFStretch1d(tf.keras.layers.Layer):
"""Stretch2d module."""
def __init__(self, x_scale, y_scale, method="nearest", **kwargs):
"""Initialize Stretch2d module.
Args:
x_scale (int): X scaling factor (Time axis in spectrogram).
y_scale (int): Y scaling factor (Frequency axis in spectrogram).
method (str): Interpolation method.
"""
super().__init__(**kwargs)
self.x_scale = x_scale
self.y_scale = y_scale
self.method = method
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C, 1).
Returns:
Tensor: Interpolated tensor (B, T * x_scale, C * y_scale, 1)
"""
x_shape = tf.shape(x)
new_size = (x_shape[1] * self.x_scale, x_shape[2] * self.y_scale)
x = tf.image.resize(x, method=self.method, size=new_size)
return x
class TFUpsampleNetWork(tf.keras.layers.Layer):
"""Upsampling network module."""
def __init__(
self,
output_channels,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
use_causal_conv=False,
**kwargs,
):
"""Initialize upsampling network module.
Args:
output_channels (int): output feature channels.
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
interpolate_mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
"""
super().__init__(**kwargs)
self.use_causal_conv = use_causal_conv
self.up_layers = []
for scale in upsample_scales:
# interpolation layer
stretch = TFStretch1d(
scale, 1, interpolate_mode, name="stretch_._{}".format(scale)
) # ->> outputs: [B, T * scale, C * 1, 1]
self.up_layers += [stretch]
# conv layer
assert (
freq_axis_kernel_size - 1
) % 2 == 0, "Not support even number freq axis kernel size."
kernel_size = scale * 2 + 1
conv = tf.keras.layers.Conv2D(
filters=1,
kernel_size=(kernel_size, freq_axis_kernel_size),
padding="causal" if self.use_causal_conv is True else "same",
use_bias=False,
) # ->> outputs: [B, T * scale, C * 1, 1]
self.up_layers += [conv]
# nonlinear
if nonlinear_activation is not None:
nonlinear = getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
self.up_layers += [nonlinear]
def call(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, T, C).
Returns:
Tensor: Upsampled tensor (B, T', C), where T' = T * prod(upsample_scales).
"""
c = tf.expand_dims(c, -1) # [B, T, C, 1]
for f in self.up_layers:
c = f(c)
return tf.squeeze(c, -1) # [B, T, C]
class TFConvInUpsampleNetWork(tf.keras.layers.Layer):
"""Convolution + upsampling network module."""
def __init__(
self,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
aux_channels=80,
aux_context_window=0,
use_causal_conv=False,
initializer_seed=42,
**kwargs,
):
"""Initialize convolution + upsampling network module.
Args:
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
aux_channels (int): Number of channels of pre-convolutional layer.
aux_context_window (int): Context window size of the pre-convolutional layer.
use_causal_conv (bool): Whether to use causal structure.
"""
super().__init__(**kwargs)
self.aux_context_window = aux_context_window
self.use_causal_conv = use_causal_conv and aux_context_window > 0
# To capture wide-context information in conditional features
kernel_size = (
aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1
)
self.conv_in = TFConv1d(
filters=aux_channels,
kernel_size=kernel_size,
padding="same",
use_bias=False,
initializer_seed=initializer_seed,
name="conv_in",
)
self.upsample = TFUpsampleNetWork(
output_channels=aux_channels,
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
use_causal_conv=use_causal_conv,
name="upsample_network",
)
def call(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, T', C).
Returns:
Tensor: Upsampled tensor (B, T, C),
where T = (T' - aux_context_window * 2) * prod(upsample_scales).
Note:
The length of inputs considers the context window size.
"""
c_ = self.conv_in(c)
return self.upsample(c_)
class TFParallelWaveGANGenerator(tf.keras.Model):
"""Parallel WaveGAN Generator module."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.out_channels = config.out_channels
self.aux_channels = config.aux_channels
self.n_layers = config.n_layers
self.stacks = config.stacks
self.kernel_size = config.kernel_size
self.upsample_params = config.upsample_params
# check the number of layers and stacks
assert self.n_layers % self.stacks == 0
n_layers_per_stack = self.n_layers // self.stacks
# define first convolution
self.first_conv = TFConv1d1x1(
filters=config.residual_channels,
use_bias=True,
padding="same",
initializer_seed=config.initializer_seed,
name="first_convolution",
)
# define conv + upsampling network
if config.upsample_conditional_features:
self.upsample_params.update({"use_causal_conv": config.use_causal_conv})
self.upsample_params.update(
{
"aux_channels": config.aux_channels,
"aux_context_window": config.aux_context_window,
}
)
self.upsample_net = TFConvInUpsampleNetWork(**self.upsample_params)
else:
self.upsample_net = None
# define residual blocks
self.conv_layers = []
for layer in range(self.n_layers):
dilation_rate = 2 ** (layer % n_layers_per_stack)
conv = TFResidualBlock(
kernel_size=config.kernel_size,
residual_channels=config.residual_channels,
gate_channels=config.gate_channels,
skip_channels=config.skip_channels,
aux_channels=config.aux_channels,
dilation_rate=dilation_rate,
dropout_rate=config.dropout_rate,
use_bias=config.use_bias,
use_causal_conv=config.use_causal_conv,
initializer_seed=config.initializer_seed,
name="residual_block_._{}".format(layer),
)
self.conv_layers += [conv]
# define output layers
self.last_conv_layers = [
tf.keras.layers.ReLU(),
TFConv1d1x1(
filters=config.skip_channels,
use_bias=config.use_bias,
padding="same",
initializer_seed=config.initializer_seed,
),
tf.keras.layers.ReLU(),
TFConv1d1x1(
filters=config.out_channels,
use_bias=True,
padding="same",
initializer_seed=config.initializer_seed,
),
tf.keras.layers.Activation("tanh"),
]
def _build(self):
mels = tf.random.uniform(shape=[2, 20, 80], dtype=tf.float32)
self(mels, training=tf.cast(True, tf.bool))
def call(self, mels, training=False, **kwargs):
"""Calculate forward propagation.
Args:
mels (Tensor): Local conditioning auxiliary features (B, T', C).
Returns:
Tensor: Output tensor (B, T, 1)
"""
# perform upsampling
if mels is not None and self.upsample_net is not None:
c = self.upsample_net(mels)
# random noise x
# enccode to hidden representation
x = tf.expand_dims(tf.random.normal(shape=tf.shape(c)[0:2]), axis=2)
x = self.first_conv(x)
skips = 0
for f in self.conv_layers:
x, h = f(x, c, training=training)
skips += h
skips *= tf.math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
x = skips
for f in self.last_conv_layers:
x = f(x)
return x
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32, name="mels"),
],
)
def inference(self, mels):
"""Calculate forward propagation.
Args:
c (Tensor): Local conditioning auxiliary features (B, T', C).
Returns:
Tensor: Output tensor (B, T, 1)
"""
# perform upsampling
if mels is not None and self.upsample_net is not None:
c = self.upsample_net(mels)
# enccode to hidden | |
is None:
return u.one
return u.Unit(unit)
@property
def wavelength(self):
"""Wavelength of the observation."""
return u.Quantity(self.meta.get('wavelnth', 0),
self.waveunit)
@property
def observatory(self):
"""Observatory or Telescope name."""
return self.meta.get('obsrvtry',
self.meta.get('telescop', "")).replace("_", " ")
@property
def processing_level(self):
"""
Returns the FITS processing level if present.
"""
return self.meta.get('lvl_num', None)
@property
def bottom_left_coord(self):
"""
The physical coordinate at the center of the bottom left ([0, 0]) pixel.
"""
return self.pixel_to_world(0*u.pix, 0*u.pix)
@property
def top_right_coord(self):
"""
The physical coordinate at the center of the the top right ([-1, -1]) pixel.
"""
top_right = u.Quantity(self.dimensions) - 1 * u.pix
return self.pixel_to_world(*top_right)
@property
def center(self):
"""
Return a coordinate object for the center pixel of the array.
If the array has an even number of pixels in a given dimension,
the coordinate returned lies on the edge between the two central pixels.
"""
center = (u.Quantity(self.dimensions) - 1 * u.pix) / 2.
return self.pixel_to_world(*center)
@property
def shifted_value(self):
"""The total shift applied to the reference coordinate by past applications of
`~sunpy.map.GenericMap.shift`."""
return self._shift
@u.quantity_input
def shift(self, axis1: u.deg, axis2: u.deg):
"""
Returns a map shifted by a specified amount to, for example, correct
for a bad map location. These values are applied directly to the
`~sunpy.map.GenericMap.reference_coordinate`. To check how much shift
has already been applied see `~sunpy.map.GenericMap.shifted_value`
Parameters
----------
axis1 : `~astropy.units.Quantity`
The shift to apply to the Longitude (solar-x) coordinate.
axis2 : `~astropy.units.Quantity`
The shift to apply to the Latitude (solar-y) coordinate
Returns
-------
out : `~sunpy.map.GenericMap` or subclass
A new shifted Map.
"""
new_meta = self.meta.copy()
# Update crvals
new_meta['crval1'] = ((self.meta['crval1'] *
self.spatial_units[0] + axis1).to(self.spatial_units[0])).value
new_meta['crval2'] = ((self.meta['crval2'] *
self.spatial_units[1] + axis2).to(self.spatial_units[1])).value
# Create new map with the modification
new_map = self._new_instance(self.data, new_meta, self.plot_settings)
new_map._shift = SpatialPair(self.shifted_value[0] + axis1,
self.shifted_value[1] + axis2)
return new_map
@property
def rsun_meters(self):
"""Radius of the sun in meters."""
return u.Quantity(self.meta.get('rsun_ref', constants.radius), 'meter')
@property
def rsun_obs(self):
"""
Angular radius of the Sun.
Notes
-----
This value is taken the ``'rsun_obs'``, ``'solar_r'``, or ``radius``
FITS keywords. If none of these keys are present the photospheric limb
as seen from the observer coordinate is returned.
"""
rsun_arcseconds = self.meta.get('rsun_obs',
self.meta.get('solar_r',
self.meta.get('radius',
None)))
if rsun_arcseconds is None:
warnings.warn("Missing metadata for solar angular radius: assuming photospheric limb "
"as seen from observer coordinate.",
SunpyUserWarning)
dsun = self.dsun
rsun = sun._angular_radius(constants.radius, dsun)
else:
rsun = rsun_arcseconds * u.arcsec
return rsun
@property
def coordinate_system(self):
"""Coordinate system used for x and y axes (ctype1/2)."""
return SpatialPair(self.meta.get('ctype1', 'HPLN- '),
self.meta.get('ctype2', 'HPLT- '))
@property
def _supported_observer_coordinates(self):
"""
A list of supported coordinate systems.
This is a list so it can easily maintain a strict order. The list of
two element tuples, the first item in the tuple is the keys that need
to be in the header to use this coordinate system and the second is the
kwargs to SkyCoord.
"""
return [(('hgln_obs', 'hglt_obs', 'dsun_obs'), {'lon': self.meta.get('hgln_obs'),
'lat': self.meta.get('hglt_obs'),
'radius': self.meta.get('dsun_obs'),
'unit': (u.deg, u.deg, u.m),
'frame': "heliographic_stonyhurst"}),
(('crln_obs', 'crlt_obs', 'dsun_obs'), {'lon': self.meta.get('crln_obs'),
'lat': self.meta.get('crlt_obs'),
'radius': self.meta.get('dsun_obs'),
'unit': (u.deg, u.deg, u.m),
'frame': "heliographic_carrington"}), ]
def _remove_existing_observer_location(self):
"""
Remove all keys that this map might use for observer location.
"""
all_keys = expand_list([e[0] for e in self._supported_observer_coordinates])
for key in all_keys:
self.meta.pop(key)
@property
def observer_coordinate(self):
"""
The Heliographic Stonyhurst Coordinate of the observer.
"""
missing_meta = {}
for keys, kwargs in self._supported_observer_coordinates:
meta_list = [k in self.meta for k in keys]
if all(meta_list):
sc = SkyCoord(obstime=self.date, **kwargs)
# We need to specially handle an observer location provided in Carrington
# coordinates. To create the observer coordinate, we need to specify the
# frame, but defining a Carrington frame normally requires specifying the
# frame's observer. This loop is the problem. Instead, since the
# Carrington frame needs only the Sun-observer distance component from the
# frame's observer, we create the same frame using a fake observer that has
# the same Sun-observer distance.
if isinstance(sc.frame, HeliographicCarrington):
fake_observer = HeliographicStonyhurst(0*u.deg, 0*u.deg, sc.radius,
obstime=sc.obstime)
fake_frame = sc.frame.replicate(observer=fake_observer)
hgs = fake_frame.transform_to(HeliographicStonyhurst(obstime=sc.obstime))
# HeliographicStonyhurst doesn't need an observer, but adding the observer
# facilitates a conversion back to HeliographicCarrington
return SkyCoord(hgs, observer=hgs)
return sc.heliographic_stonyhurst
elif any(meta_list) and not set(keys).isdisjoint(self.meta.keys()):
if not isinstance(kwargs['frame'], str):
kwargs['frame'] = kwargs['frame'].name
missing_meta[kwargs['frame']] = set(keys).difference(self.meta.keys())
warning_message = "".join(
[f"For frame '{frame}' the following metadata is missing: {','.join(keys)}\n" for frame, keys in missing_meta.items()])
warning_message = "Missing metadata for observer: assuming Earth-based observer.\n" + warning_message
warnings.warn(warning_message, SunpyMetadataWarning, stacklevel=3)
return get_earth(self.date)
@property
def heliographic_latitude(self):
"""Observer heliographic latitude."""
return self.observer_coordinate.lat
@property
def heliographic_longitude(self):
"""Observer heliographic longitude."""
return self.observer_coordinate.lon
@property
def carrington_latitude(self):
"""Observer Carrington latitude."""
hgc_frame = HeliographicCarrington(observer=self.observer_coordinate, obstime=self.date)
return self.observer_coordinate.transform_to(hgc_frame).lat
@property
def carrington_longitude(self):
"""Observer Carrington longitude."""
hgc_frame = HeliographicCarrington(observer=self.observer_coordinate, obstime=self.date)
return self.observer_coordinate.transform_to(hgc_frame).lon
@property
def dsun(self):
"""Observer distance from the center of the Sun."""
return self.observer_coordinate.radius.to('m')
@property
def _reference_longitude(self):
"""
FITS-WCS compatible longitude. Used in self.wcs and
self.reference_coordinate.
"""
return self.meta.get('crval1', 0.) * self.spatial_units[0]
@property
def _reference_latitude(self):
return self.meta.get('crval2', 0.) * self.spatial_units[1]
@property
def reference_coordinate(self):
"""Reference point WCS axes in data units (i.e. crval1, crval2). This value
includes a shift if one is set."""
return SkyCoord(self._reference_longitude,
self._reference_latitude,
frame=self.coordinate_frame)
@property
def reference_pixel(self):
"""
Pixel of reference coordinate.
The pixel returned uses zero-based indexing, so will be 1 pixel less
than the FITS CRPIX values.
"""
return PixelPair((self.meta.get('crpix1',
(self.meta.get('naxis1') + 1) / 2.) - 1) * u.pixel,
(self.meta.get('crpix2',
(self.meta.get('naxis2') + 1) / 2.) - 1) * u.pixel)
@property
def scale(self):
"""
Image scale along the x and y axes in units/pixel
(i.e. cdelt1, cdelt2).
"""
# TODO: Fix this if only CDi_j matrix is provided
return SpatialPair(self.meta.get('cdelt1', 1.) * self.spatial_units[0] / u.pixel,
self.meta.get('cdelt2', 1.) * self.spatial_units[1] / u.pixel)
@property
def spatial_units(self):
"""
Image coordinate units along the x and y axes (i.e. cunit1, cunit2).
"""
return SpatialPair(u.Unit(self.meta.get('cunit1')),
u.Unit(self.meta.get('cunit2')))
@property
def rotation_matrix(self):
"""
Matrix describing the rotation required to align solar North with
the top of the image.
"""
if 'PC1_1' in self.meta:
return np.array([[self.meta['PC1_1'], self.meta['PC1_2']],
[self.meta['PC2_1'], self.meta['PC2_2']]])
elif 'CD1_1' in self.meta:
cd = np.array([[self.meta['CD1_1'], self.meta['CD1_2']],
[self.meta['CD2_1'], self.meta['CD2_2']]])
cdelt = u.Quantity(self.scale).value
return cd / cdelt
else:
return self._rotation_matrix_from_crota()
def _rotation_matrix_from_crota(self):
"""
This method converts the deprecated CROTA FITS kwargs to the new
PC rotation matrix.
This method can be overriden if an instruments header does not use this
conversion.
"""
lam = self.scale[0] / self.scale[1]
p = np.deg2rad(self.meta.get('CROTA2', 0))
return np.array([[np.cos(p), -1 * lam * np.sin(p)],
[1/lam * np.sin(p), np.cos(p)]])
@property
def fits_header(self):
"""
A `~astropy.io.fits.Header` representation of the ``meta`` attribute.
"""
return sunpy.io.fits.header_to_fits(self.meta)
# #### Miscellaneous #### #
def _fix_date(self):
# Check commonly used but non-standard FITS keyword for observation
# time and correct the keyword if we can. Keep updating old one for
# backwards compatibility.
if is_time(self.meta.get('date_obs', None)):
self.meta['date-obs'] = self.meta['date_obs']
def _fix_naxis(self):
# If naxis is not specified, get it from the array shape
if 'naxis1' not in self.meta:
self.meta['naxis1'] = self.data.shape[1]
if 'naxis2' not in self.meta:
self.meta['naxis2'] = self.data.shape[0]
if 'naxis' not in self.meta:
self.meta['naxis'] = self.ndim
def _fix_bitpix(self):
# Bit-depth
#
# 8 Character or unsigned binary integer
# 16 16-bit twos-complement binary integer
# 32 32-bit twos-complement binary integer
# -32 IEEE single precision floating point
# -64 IEEE double precision floating point
#
if 'bitpix' not in self.meta:
float_fac = -1 if self.dtype.kind == "f" else 1
self.meta['bitpix'] = float_fac * 8 * self.dtype.itemsize
def _get_cmap_name(self):
"""Build the default color map name."""
cmap_string = (self.observatory + self.detector +
str(int(self.wavelength.to('angstrom').value)))
return cmap_string.lower()
def _validate_meta(self):
"""
Validates the meta-information associated with a Map.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.map.sources package.
Allows for default unit assignment for:
CUNIT1, CUNIT2, WAVEUNIT
"""
msg = ('Image coordinate units for axis {} not present in metadata.')
err_message = []
for i in [1, 2]:
if self.meta.get(f'cunit{i}') is None:
err_message.append(msg.format(i, i))
if err_message:
err_message.append(
f'See | |
when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = 0
self.n2 = 0
self.ResVol = 162.9
self.SideChainVol = 162.9-54.1
class Pro(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'P')
# Proline
# **********
# NH-(CH2)3-CH-COOH
# |_________|
# Side chain bond to C alpha
# exceptional conformational rigidity
# usually solvent-exposed.
# lacks a hydrogen on the amide group, it cannot act as a hydrogen bond donor,
# only as a hydrogen bond acceptor.
#
# Molecular weight 115.13 Da
# Non ploar
# Acidity - Natural
# Hydrophobicity 0.711 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index -1.6 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 6.30
# pKa( alpha-COOH) 1.95
# pKa( alpha-NH2) 10.64
# CAS # 147-85-3
# PubChem ID 614
#
self.Hydropathy = -1.6
self.ResWeight = 59
self.name3L = 'PRO'
self.Hydrophobic = 1 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 0
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0,3:-3,4:-3,5:-3,6:-2} # special value scores
self.n1 = 0
self.n2 = 0
self.ResVol = 112.7
self.SideChainVol = 112.7-54.1
# ############ Non Polar Uncharged ###########
class Gly(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'G')
#
# NH2-CH2-COOH
#
# Molecular weight 75.07 Da
# Non ploar
# Acidity - Natural
# Hydrophobicity 0.501 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index -0.4 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 6.06
# pKa( alpha-COOH) 2.35
# pKa( alpha-NH2) 9.78
# CAS # 56-40-6
# PubChem ID 750
self.Hydrophobic = 0 # 1: Hydrophobic, 0: Hydrophilic
self.Hydropathy = -0.4
self.ResWeight = 19
self.name3L = 'GLY'
self.SpecialRes = {0:0,3:-3,5:-3} # special value scores
self.ResVol = 60.1
self.SideChainVol = 60.1-54.1
# ############ Polar Uncharged ###########
class Ser(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'S')
# Serine
# ######
# HO-CH2-CH(NH2)-COOH
#
# Molecular weight 105.09 Da
# Ploar
# Acidity - Natural
# Hydrophobicity 0.359 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index -0.8 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 5.68
# pKa( alpha-COOH) 2.19
# pKa( alpha-NH2) 9.21
# CAS # 56-45-1
# PubChem ID 617
#
self.Hydropathy = -0.8
self.ResWeight = 49
self.name3L = 'SER'
self.Hydrophobic = 0 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 1
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = 0
self.n2 = 0
self.ResVol = 89.0
self.SideChainVol = 89-54.1
class Thr(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'T')
# Threonine
# ##########
# CH3-CH(OH)-CH(NH2)-COOH
# bearing an alcohol group
#
# Essential AA
# Molecular weight 119.12 Da
# Ploar
# Acidity - Natural
# Hydrophobicity 0.450 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index -0.7 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 5.60
# pKa( alpha-COOH) 2.09
# pKa( alpha-NH2) 9.10
# CAS # 72-19-5
# PubChem ID 6288
#
self.Hydropathy = -0.7
self.ResWeight = 63
self.name3L = 'THR'
self.Hydrophobic = 0 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 1
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = 0
self.n2 = 0
self.ResVol = 116.1
self.SideChainVol = 116.1-54.1
class Cys(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'C')
# Cysteine
# ######
# HS-CH2-CH(NH2)-COOH
# thiol (R-S-H) side chain
# Has Sulfur in side chain
#
# Molecular weight 121.16 Da
# Ploar
# Acidity - Natural
# Hydrophobicity 0.680 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index 2.5 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 5.05
# pKa( alpha-COOH) 1.92
# pKa( alpha-NH2) 10.70
# CAS # 59-90-4
# PubChem ID 5862
#
self.Hydropathy = 2.5
self.ResWeight = 65
self.name3L = 'CYS'
self.Hydrophobic = 0 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 1
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.n1 = -7
self.n2 = 0
self.SpecialRes = {0:0} # special value scores
self.ResVol = 108.5
self.SideChainVol = 108.5-54.1
class Tyr(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'Y')
# Tyrosine
# ###########
# HO-p-Ph-CH2-CH(NH2)-COOH
#
# Molecular weight 181.19 Da
# Non ploar
# Acidity - Natural
# Hydrophobicity 0.880 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index -1.3 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 5.64
# pKa( alpha-COOH) 2.20
# pKa( alpha-NH2) 9.21
# CAS # 60-18-4
# PubChem ID 1153
#
self.Hydropathy = -1.3
self.ResWeight = 125
self.name3L = 'TYR'
self.Hydrophobic = 0 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 1
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = 0
self.n2 = 0
self.ResVol = 193.6
self.SideChainVol = 193.6-54.1
class Asn(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'N')
# Asparagine
# ##########
# H2N-CO-CH2-CH(NH2)-COOH
# N Donor - NH2
#
# has carboxamide as the side chain's functional group(R-CO-NH2)
# side chain can form hydrogen bond interactions with the peptide backbone
# often found near the beginning and the end of alpha-helices,
# and in turn motifs in beta sheets.
#
# Molecular weight 132.12 Da
# Ploar
# Acidity - Natural
# Hydrophobicity 0.236 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index -3.5 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 5.41
# pKa( alpha-COOH) 2.14
# pKa( alpha-NH2) 8.72
# CAS # 70-47-3
# PubChem ID 236
#
self.Hydropathy = -3.5
self.ResWeight = 76
self.name3L = 'ASN'
self.Hydrophobic = 0 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 1
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = -1
self.n2 = -2
self.ResVol = 114.1
self.SideChainVol = 114.1-54.1
class Gln(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'Q')
# Glutamine
# #############
# H2N-CO-(CH2)2-CH(NH2)-COOH
# N Donor - NH2
#
# Molecular weight 146.14 Da
# Ploar
# Acidity - Natural
# Hydrophobicity 0.251 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index -3.5 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 5.65
# pKa( alpha-COOH) 2.17
# pKa( alpha-NH2) 9.13
# CAS # 56-85-9
# PubChem ID 5950
#
self.Hydropathy = -3.5
self.ResWeight = 90
self.name3L = 'GLN'
self.Hydrophobic = 0 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 1
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.n1 = -1
self.n2 = -2
self.SpecialRes = {0:0} # special value scores
self.ResVol = 143.8
self.SideChainVol = 143.8-54.1
# ########## Polar Acidic ###########
class Asp(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'D')
# Aspartic acid
# ########
# HOOC-CH2-CH(NH2)-COOH
#
# Molecular weight 133.10 Da
# Ploar
# Acidity - Acidic
# Hydrophobicity 0.028 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index -3.5 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 2.85
# pKa( alpha-COOH) 1.99
# pKa( alpha-NH2) 9.90
# CAS # 56-84-8
# PubChem ID 5960
#
self.Hydropathy = -3.5
self.ResWeight = 77
self.name3L = 'ASP'
self.Hydrophobic = 0 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 1
self.polar = 1
self.corner = 0 # Would | |
# -*- coding: utf-8 -*-
# encoding=utf8
import sys
if sys.version_info >= (3,0,0):
long = int
import elasticsearch_dsl
es_dsl_version = elasticsearch_dsl.__version__
from six import iteritems
from elasticsearch import Elasticsearch, helpers
from elasticsearch_dsl import *
from elasticsearch_dsl.connections import connections
import fileinput, logging, argparse, gc, codecs, json, math, hashlib, signal, os, traceback, time
from argparse import RawTextHelpFormatter
from datetime import datetime
from threading import Thread, Event
from debug_utils import log_rss_memory_usage
log = logging.getLogger(__name__)
logging.basicConfig(format="[ %(asctime)s %(levelname)s %(process)s ] " + "%(message)s", level=logging.INFO)
args = None
translate_cfg_property = None
es_version = None
geo = None
if es_dsl_version >= (6, 0, 0):
#no string object
#http://elasticsearch-dsl.readthedocs.io/en/latest/Changelog.html?highlight=String#id2
log.error('Please, use the versions provided in requirements.txt. Version >=6.0.0 of elasticsearch-dsl modules break backward compatibility.')
sys.exit()
def parse_args():
parser = argparse.ArgumentParser(description='This program indexes files to elasticsearch.\n', formatter_class=RawTextHelpFormatter)
parser.add_argument('-i', '--input', dest='input', required=False, default='-', help='Input file. Default: stdin.')
parser.add_argument('-c', '--cfg', dest='cfg', required=False, default=False, help='Configuration file.')
parser.add_argument('-s', '--separator', dest='separator', required=False, default=';', help='File Separator. Default: ;')
#override configuration stuff
parser.add_argument('-x', '--index', dest='index', required=False, default=None, help='Elasticsearch index. It overrides the cfg JSON file values. Default: the index specified in the JSON file.')
parser.add_argument('-t', '--type', dest='type', required=False, default=None, help='Elasticsearch document type. It overrides the cfg JSON file values. Default: the type specified in the JSON file.')
#elastic connection stuff
parser.add_argument('-n', '--node', dest='node', required=False, default='localhost', help='Elasticsearch node. Default: localhost')
parser.add_argument('-p', '--port', dest='port', required=False, default=9200, help='Elasticsearch port. Default: 9200')
parser.add_argument('-u', '--user', dest='user', required=False, default=None, help='Elasticsearch user if needed.')
parser.add_argument('-P', '--password', dest='password', required=False, default='', help='Elasticsearch password if needed.')
#extra stuff to consider when indexing
parser.add_argument('--skip_first_line', dest='skip_first_line', default=False, action='store_true', help='Skips first line.')
parser.add_argument('--dates_in_seconds', dest='dates_in_seconds', default=False, action='store_true', help='If true, assume dates are provided in seconds.')
parser.add_argument('--refresh', dest='refresh', default=False, action='store_true', help='Refresh the index when finished.')
parser.add_argument('--delete', dest='delete', default=False, action='store_true', help='Delete the index before process.')
parser.add_argument('--utf8', dest='utf8', default=False, action='store_true', help='Change the default encoding to utf8. In python2 performance is drastically affected. No effect in python3.')
parser.add_argument('-X', '--extra_data', dest='extra_data', required=False, default=None, help='Pairs field:value with value beeing a keyword string that will be indexed with each document. Multiple pairs allowed with \';;;\' as separator. For example: --extra_data \'service:mail;;;host:mailserver\'')
parser.add_argument('--typed_iterator', dest='typed_iterator', default=False, action='store_true', help='If true, use a typed iterator that checks the value types and parses them. Reduces performance.')
#meta stuff to consider when creating indices
parser.add_argument('--replicas', dest='replicas', default=0, help='Number of replicas for the index if it does not exist. Default: 0')
parser.add_argument('--shards', dest='shards', default=2, help='Number of shards for the index if it does not exist. Default: 2')
parser.add_argument('--refresh_interval', dest='refresh_interval', default='60s', help='Refresh interval for the index if it does not exist. Default: 60s')
parser.add_argument('--no_source', dest='no_source', default=False, action='store_true', help='If true, do not index _source field.')
parser.add_argument('--no_all', dest='no_all', default=False, action='store_true', help='If true, do not index _all field.')
parser.add_argument('--deflate_compression', dest='deflate_compression', default=False, action='store_true', help='Store compression level in Lucene indices. Elasticsearch default is usually LZ4. This option enables best_compression using DEFLATE compression. More information: https://www.elastic.co/blog/store-compression-in-lucene-and-elasticsearch')
#index sutff for elastic
parser.add_argument('--bulk', dest='bulk', required=False, default=2000, type=int, help='Elasticsearch bulk size parameter. Default: 2000')
parser.add_argument('--threads', dest='threads', required=False, default=5, type=int, help='Number of threads for the parallel bulk. Default: 5')
parser.add_argument('--queue', dest='queue', required=False, default=6, type=int, help='Size of the task queue between the main thread (producing chunks to send) and the processing threads. Default: 6')
parser.add_argument('--timeout', dest='timeout', required=False, type=int, default=600, help='Connection timeout in seconds. Default: 600')
#internal stuff for the elastic API
parser.add_argument('--debug', dest='debug', default=False, action='store_true', help='If true log level is set to DEBUG.')
parser.add_argument('--no_progress', dest='noprogress', default=False, action='store_true', help='If true do not show progress.')
parser.add_argument('--show_elastic_logger', dest='show_elastic_logger', default=False, action='store_true', help='If true show elastic logger at the same loglevel as the importer.')
parser.add_argument('--raise_on_error', dest='raise_on_error', default=False, action='store_true', help='Raise BulkIndexError containing errors (as .errors) from the execution of the last chunk when some occur. By default we DO NOT raise.')
parser.add_argument('--raise_on_exception', dest='raise_on_exception', default=False, action='store_true', help='By default we DO NOT propagate exceptions from call to bulk and just report the items that failed as failed. Use this option to propagate exceptions.')
parser.add_argument('--test_processing_speed', dest='test_processing_speed', default=False, action='store_true', help='For debugging purposes, only consumes the iterator lines without indexing.')
#stuff to avoid duplicates
parser.add_argument('--md5_id', dest='md5_id', default=False, action='store_true', help='Uses the MD5 hash of the line as ID.')
parser.add_argument('--md5_exclude', dest='md5_exclude', nargs = '*', required=False, default=[], help='List of column names to be excluded from the hash.')
#stuff to add geographical information from data fields
parser.add_argument('--geo_precission', dest='geo_precission', default=None, help='If set, geographical information will be added to the indexed documents. Possible values: country_level, multilevel, IP. If country_level is used in the geo_precission parameter, a column must be provided with either the country_code with 2 letters (ISO 3166-1 alpha-2) or the country_name in the format of the countries.csv file of the repository, for better results use country_code. If multilevel is set in the geo_precission option, then, a column or list of columns must be provided with either the country_code, region_name, place_name, or zip_code. If IP is set in the geo_precission option, then a column name containing IP addresses must be provided.')
parser.add_argument('--geo_column_country_code', dest='geo_column_country_code', default=None, help='Column name containing country codes with 2 letters (ISO 3166-1 alpha-2). Used if geo_precission is set to either country_level or multilevel.')
parser.add_argument('--geo_column_country_name', dest='geo_column_country_name', default=None, help='Column name containing country names. Used if geo_precission is set to either country_level.')
parser.add_argument('--geo_column_region_name', dest='geo_column_region_name', default=None, help='Column name containing region names. Used if geo_precission is set to multilevel.')
parser.add_argument('--geo_column_place_name', dest='geo_column_place_name', default=None, help='Column name containing place names. Used if geo_precission is set to multilevel.')
parser.add_argument('--geo_column_zip_code', dest='geo_column_zip_code', default=None, help='Column name containing zip codes. Used if geo_precission is set to multilevel.')
parser.add_argument('--geo_column_ip', dest='geo_column_ip', default=None, help='Column name containing IP addresses. Used if geo_precission is set to IP.')
parser.add_argument('--geo_int_ip', dest='geo_int_ip', default=False, help='Set if the provided IP addresses are integer numbers.')
#geo databases stuff
parser.add_argument('--regenerate_databases', dest='regenerate_databases', nargs = '*', required=False, default=[], help='Regenerate geo databases and exit. Specify the databases to regenerate: db9, db0, multilevel.')
#stuff for TOR information
parser.add_argument('--tor-info-from', dest='tor_info_from', default=False, help='Column name containing IP addresses. Information will be added about the relation (if any) of the IP to the TOR network.')
parser.add_argument('--tor-int-ip', dest='tor_int_ip', default=False, help='Set if the provided IP addresses are integer numbers.')
args = parser.parse_args()
#set up loggers
if not args.show_elastic_logger:
for _ in ("elasticsearch", "urllib3"):
logging.getLogger(_).setLevel(logging.CRITICAL)
loggers = [log, logging.getLogger('geodb')]
loglevel = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format="[ %(asctime)s %(levelname)s %(threadName)s ] " + "%(message)s", level=loglevel)
#logging.basicConfig(format='%(asctime)s %(message)s', level=loglevel)
for logger in loggers:
logger.setLevel(loglevel)
if len(args.regenerate_databases) == 0 and not args.cfg:
parser.error("-c or --cfg required.")
elif len(args.regenerate_databases) > 0:
path = get_script_path()
import geodb
if 'db0' in args.regenerate_databases:
fname = '{}/db/geodb0.db'.format(path)
if os.path.isfile(fname):
os.remove(fname)
geodb.CountryLevel_GeoDB('db0', '{}/db/countries.csv'.format(path), fname, update=True, debug=args.debug)
if 'multilevel' in args.regenerate_databases:
fname = '{}/db/multilevel.db'.format(path)
if os.path.isfile(fname):
os.remove(fname)
log.info('FTS5 Support: {}'.format(geodb.ZIPLevel_GeoDB.check_FTS5_support()))
geodb.ZIPLevel_GeoDB('geoinfo', '{}/db/create_zip_db.sql.gz'.format(path), '{}/db/multilevel.db'.format(path), update=True, debug=args.debug)
#geodb.ZIPLevel_GeoDB('{}/db/multilevel.db'.format(path), '{}/db/create_zip_db.sql.gz'.format(path), update=True)
if 'db9' in args.regenerate_databases:
fname = '{}/db/geodb9.db'.format(path)
if os.path.isfile(fname):
os.remove(fname)
fname = '{}/db/geodb9.db'.format(path)
geodb.ZIP_GeoIPDB('db9', '{}/db/IP2LOCATION-LITE-DB9.CSV.gz'.format(path), fname, update=True, debug=args.debug)
sys.exit(1)
if args.extra_data is not None:
pairs = args.extra_data.split(';;;')
extra_data_dicc = {}
for pair in pairs:
fieldname, fieldvalue = pair.split(':')
extra_data_dicc[fieldname] = fieldvalue
args.extra_data = extra_data_dicc
args.geo_precission = args.geo_precission.lower() if args.geo_precission is not None else args.geo_precission
if args.geo_precission not in ['ip', 'multilevel', 'country_level', None]:
log.error("Please, provide a valid --geo_precission option {'ip', 'multilevel', 'country_level'}.")
sys.exit(-1)
args.geo_fields = {}
if args.geo_precission == 'ip':
if args.geo_column_ip is not None:
args.geo_fields['ip'] = args.geo_column_ip
elif args.geo_precission == 'multilevel':
if args.geo_column_country_code is not None:
args.geo_fields['country_code'] = args.geo_column_country_code
if args.geo_column_country_name is not None:
args.geo_fields['country_name'] = args.geo_column_country_name
if args.geo_column_region_name is not None:
args.geo_fields['region_name'] = args.geo_column_region_name
if args.geo_column_place_name is not None:
args.geo_fields['place_name'] = args.geo_column_place_name
if args.geo_column_zip_code is not None:
args.geo_fields['zip_code'] = args.geo_column_zip_code
elif args.geo_precission == 'country_level':
if args.geo_column_country_code is not None:
args.geo_fields['country_code'] = args.geo_column_country_code
if args.geo_column_country_name is not None:
args.geo_fields['country_name'] = args.geo_column_country_name
if args.geo_precission is not None and len(args.geo_fields) == 0:
log.error('Please provide the --geo_column options')
sys.exit(-1)
args.geodb = load_geo_database(args.geo_precission, args.debug)
if args.geodb is not None:
log_rss_memory_usage('After loading geo module.')
log.info('Geo-module loaded.')
args.tor_info = None
if args.tor_info_from != False:
log.info('Loading TORinfo module.')
from torinfo import TORinfo
path = get_script_path()
args.tor_info = TORinfo('{}/db/Tor_ip_list_EXIT.csv'.format(path), '{}/db/Tor_ip_list_ALL.csv'.format(path))
args.date_fields = []
return args
#SYS STUFF
def get_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
#TOR STUFF
def get_torinfo_field():
extra_tor_fields = {}
extra_tor_fields['tor_info'] = translate_cfg_property('keyword')
extra_tor_fields['tor_is_exit_node'] = translate_cfg_property('boolean')
extra_tor_fields['tor_is_tor_server'] = translate_cfg_property('boolean')
return extra_tor_fields
#GEO LOCATION STUFF
def get_geodata_field(level):
"""Creates a geodata field for the DocType considering the following format.
Format for Country Level geolocalization:
{'country_code': 'US',
'country_name': 'UNITED STATES',
'location': '37.09024,-95.712891',
'representative_point': '37.09024,-95.712891'}
Format for Multilevel geolocalization:
{'accuracy': 4.0,
'admin_code1': 'DC',
'admin_code2': '001',
'admin_code3': '',
'admin_name1': 'DISTRICT OF COLUMBIA',
'admin_name2': 'DISTRICT OF COLUMBIA',
'admin_name3': '',
'country_code': 'US',
'country_name': None, // always None in Multilevel geolocalization
'location': '38.9122,-77.0177',
'place_name': 'WASHINGTON',
'representative_point': '38.8959,-77.0211',
'zip_code': '20001'}
Format for IP Level geolocalization:
{'country_code': 'US',
'country_name': 'UNITED STATES',
'location': '37.44188,-122.14302',
'place_name': 'PALO ALTO',
'region_name': 'CALIFORNIA',
'representative_point': '37.57259,-92.932405',
'zip_code': '94301'}
The three formats match in location, representative_point, country_code
:return: A geodata field.
"""
#
extra_geo_fields = {}
if level == 'country_level':
pass
if level == 'multilevel':
extra_geo_fields['geo_accuracy'] = translate_cfg_property('float')
extra_geo_fields['geo_admin_code2'] = translate_cfg_property('keyword')
extra_geo_fields['geo_admin_code3'] = translate_cfg_property('keyword')
extra_geo_fields['geo_admin_name1'] = translate_cfg_property('keyword')
extra_geo_fields['geo_admin_name2'] = translate_cfg_property('keyword')
extra_geo_fields['geo_admin_name3'] = translate_cfg_property('keyword')
extra_geo_fields['geo_place_name'] = translate_cfg_property('keyword')
extra_geo_fields['geo_zip_code'] = translate_cfg_property('keyword')
if level == 'ip':
extra_geo_fields['geo_place_name'] = translate_cfg_property('keyword')
extra_geo_fields['geo_region_name'] = translate_cfg_property('keyword')
extra_geo_fields['geo_zip_code'] = translate_cfg_property('keyword')
extra_geo_fields['geo_country_code'] = translate_cfg_property('keyword')
extra_geo_fields['geo_country_name'] = translate_cfg_property('keyword')
extra_geo_fields['geo_location'] = translate_cfg_property('geopoint')
extra_geo_fields['geo_representative_point'] = translate_cfg_property('geopoint')
return extra_geo_fields
def load_geo_database(level, debug):
if level == 'country_level':
path = get_script_path()
log_rss_memory_usage('Before loading geo module.')
from geodb import CountryLevel_GeoDB
return CountryLevel_GeoDB('db0', '{}/db/countries.csv'.format(path), '{}/db/geodb0.db'.format(path), update=False, debug=debug)
elif level == 'multilevel':
path = get_script_path()
log_rss_memory_usage('Before loading geo module.')
from geodb import ZIPLevel_GeoDB
log.info('FTS5 Support: {}'.format(ZIPLevel_GeoDB.check_FTS5_support()))
return ZIPLevel_GeoDB('{}/db/multilevel.db'.format(path), '{}/db/create_zip_db.sql.gz'.format(path), update=False, | |
<reponame>team-aisaac/aisaac-strategy
#!/usr/bin/env python
# coding:utf-8
import math
import rospy
import numpy as np
from world.objects import Objects
from aisaac.msg import Ball_sub_params, Def_pos
from statistics import variance
import config
from common import functions
WORLD_LOOP_RATE = config.WORLD_LOOP_RATE
"""
主に共通した計算処理などを担当する
"""
# Publisher用クラス
class Publisher():
def __init__(self):
self.team_color = str(rospy.get_param("friend_color"))
self.ball_sub_params_pub = rospy.Publisher("/" + self.team_color + "/ball_sub_params", Ball_sub_params, queue_size=10)
self.def_pos_pub = rospy.Publisher("/" + self.team_color + "/def_pos", Def_pos, queue_size=10)
def ball_params_publisher(self, msg):
self.ball_sub_params_pub.publish(msg)
def def_pos_publisher(self, msg):
self.def_pos_pub.publish(msg)
class Calculation():
def __init__(self):
rospy.init_node("Calculation_node")
self.robot_color = str(rospy.get_param("friend_color"))
self.robot_side = str(rospy.get_param("team_side"))
# Composition
self.objects = Objects(
self.robot_color, self.robot_side, config.NUM_FRIEND_ROBOT, config.NUM_ENEMY_ROBOT, node="calculation")
self.robot_friend = self.objects.robot
self.robot_enemy = self.objects.enemy
self.ball_params = self.objects.ball
self.ball_sub_params = Ball_sub_params()
self.def_pos = Def_pos()
self.ball_frame = 10 # ボールの軌道直線フィッティングと速度の計算フレーム数
self.ball_move_threshold = 0.01 # ボールが移動したと判定する閾値[m]
self.same_pos_count = 0 # 停止判定用カウント
self.ball_pos_count = 0 # 計算用カウント、フレーム単位でカウント
self.calc_flag = False # 計算フラグ、停止判定時は計算しない
self.ball_pos_x_array = np.array([0.0]*self.ball_frame) # ボールのx座標保存用配列
self.ball_pos_y_array = np.array([0.0]*self.ball_frame) # ボールのy座標保存用配列
self.ball_vel_array = np.array([0.0]*self.ball_frame) # ボールの速度保存用配列
self.ball_vel_x_array = np.array([0.0]*self.ball_frame) # ボールのx方向の速度保存用配列
self.ball_vel_y_array = np.array([0.0]*self.ball_frame) # ボールのy方向の速度保存用配列
self.ball_vel_time_array = np.array([0.0]*self.ball_frame) # 加速度計算用、時間配列
self.ball_vel = 0. # ボール速度
self.ball_vel_a = 0. # ボール速度の傾き
self.ball_vel_b = 0. # ボール速度の切片
self.ball_vel_x_a = 0. # x方向の速度の傾き
self.ball_vel_x_b = 0. # x方向の速度の切片
self.ball_vel_y_a = 0. # y方向の速度の傾き
self.ball_vel_y_b = 0. # y方向の速度の切片
self.ball_stop_time_x = 0. # x方向の停止までの時間
self.ball_stop_time_y = 0. # y方向の停止までの時間
# 守備の時のロボットのポジション座標計算用変数
# 現状、青チームのみ対応
self.g_up_x = -6.0 # ゴールポストの上側のx座標:y_GL
self.g_up_y = 0.6 # ゴールポストの上側のy座標:x_GL
self.g_down_x = -6.0 # ゴールポストの下側のx座標:y_GR
self.g_down_y = -0.6 # ゴールポストの下側のy座標:x_GR
self.g_center_x = -6.0 # ゴールの中央のx座標:y_GC
self.g_center_y = 0.0 # ゴールの中央のy座標:x_GC
self.p_area_up_x = -4.8 # ペナルティエリアの上側の角のx座標:y_PL
self.p_area_up_y = 1.2 # ペナルティエリアの上側の角のy座標:x_PL
self.p_area_down_x = -4.8 # ペナルティエリアの下側の角のx座標:y_PR
self.p_area_down_y = -1.2 # ペナルティエリアの下側の角のy座標:x_PR
self.line_up_x = 0.0 # ボールとゴールポストを結んだ線と防御ラインとの交点の上側のx座標:y_LL
self.line_up_y = 0.0 # ボールとゴールポストを結んだ線と防御ラインとの交点の上側のy座標:x_LL
self.line_down_x = 0.0 # ボールとゴールポストを結んだ線と防御ラインとの交点の下側のx座標:y_LR
self.line_down_y = 0.0 # ボールとゴールポストを結んだ線と防御ラインとの交点の下側のy座標:x_LR
self.line_up_r_x = 0.0 # ロボットの半径を考慮した補正後の座標:y_LL'
self.line_up_r_y = 0.0 # ロボットの半径を考慮した補正後の座標:x_LL'
self.line_down_r_x = 0.0 # ロボットの半径を考慮した補正後の座標:y_LR'
self.line_down_r_y = 0.0 # ロボットの半径を考慮した補正後の座標:x_LR'
self.offset_r = 0. # オフセット値
self.robot_r = 90.0/1000.0 # ロボット半径
self.robot_a = 1.0 # ロボットの加速度
self.ball_MAX_SPEED = 6.5 # ボールの最大速度
self.delay_time_ms = 100.0 # 遅延時間[ms]
self.L_a = 0.0 # 壁のラインとボールまでの距離
self.L_G = 0.0 # ボール到達までに移動可能な距離
# x,yの配列とデータ数を指定して、最小二乗法を行い、傾きと切片を返す
def reg1dim(self, x, y, n):
# データをクリップ
x = np.clip(x,-6.5,6.5)
y = np.clip(y,-5.5,5.5)
# 傾きと切片を計算
a = np.clip(((np.dot(x, y) - y.sum()*x.sum()/n) / ((x**2.).sum() - x.sum()**2./n)),-1.0e+3,1.0e+3)
b = np.clip((y.sum() - a * x.sum())/n,-1.0e+3,1.0e+3)
return a, b
# nフレーム分のボールの位置から最小二乗法を用いて傾きと切片を計算
# 分散が1より大きかったり、ボールが止まっているとリセット
def calc_ball_line(self):
#直近nフレームの座標を取得
if self.ball_pos_count < self.ball_frame:
self.ball_pos_x_array[self.ball_pos_count] = self.ball_params.get_current_position()[0]
self.ball_pos_y_array[self.ball_pos_count] = self.ball_params.get_current_position()[1]
# self.ball_vel_x_array[self.ball_pos_count] = self.ball_params.get_current_velosity()[0]
# self.ball_vel_y_array[self.ball_pos_count] = self.ball_params.get_current_velosity()[1]
# self.ball_vel_array[self.ball_pos_count] = math.sqrt(self.ball_params.get_current_velosity()[0]**2 + self.ball_params.get_current_velosity()[1]**2)
# self.ball_vel_time_array[self.ball_pos_count] = 1./WORLD_LOOP_RATE * self.ball_pos_count
# 二回目以降に、前回との偏差を計算し、一定値以下なら動いてない判定とし、カウントを増やす。nフレームの半分までカウントされたら計算フラグをFalseにして
if self.ball_pos_count > 0:
if functions.distance_btw_two_points(
(self.ball_pos_x_array[self.ball_pos_count-1],self.ball_pos_y_array[self.ball_pos_count-1]),
(self.ball_pos_x_array[self.ball_pos_count],self.ball_pos_y_array[self.ball_pos_count])) < self.ball_move_threshold:
self.same_pos_count+=1
if self.same_pos_count >= self.ball_frame/2:
self.same_pos_count = self.ball_frame/2
self.ball_pos_count = -1
self.calc_flag = False
else:
self.same_pos_count = 0
self.calc_flag = True
self.ball_pos_count+=1
else:
self.ball_pos_x_array = np.roll(self.ball_pos_x_array,-1)
self.ball_pos_y_array = np.roll(self.ball_pos_y_array,-1)
# self.ball_vel_x_array = np.roll(self.ball_vel_x_array,-1)
# self.ball_vel_y_array = np.roll(self.ball_vel_y_array,-1)
# self.ball_vel_array = np.roll(self.ball_vel_array,-1)
self.ball_pos_x_array[self.ball_pos_count-1] = self.ball_params.get_current_position()[0]
self.ball_pos_y_array[self.ball_pos_count-1] = self.ball_params.get_current_position()[1]
# self.ball_vel_x_array[self.ball_pos_count-1] = self.ball_params.get_current_velosity()[0]
# self.ball_vel_y_array[self.ball_pos_count-1] = self.ball_params.get_current_velosity()[1]
# self.ball_vel_array[self.ball_pos_count] = math.sqrt(self.ball_params.get_current_velosity()[0]**2 + self.ball_params.get_current_velosity()[1]**2)
if functions.distance_btw_two_points(
(self.ball_pos_x_array[self.ball_pos_count-2],self.ball_pos_y_array[self.ball_pos_count-2]),
(self.ball_pos_x_array[self.ball_pos_count-1],self.ball_pos_y_array[self.ball_pos_count-1])) < self.ball_move_threshold:
self.same_pos_count+=1
if self.same_pos_count >= self.ball_frame/2:
self.ball_pos_count = 0
self.calc_flag = False
else:
self.same_pos_count = 0
self.calc_flag = True
#x,y座標の分散を計算
x_variance = variance(self.ball_pos_x_array)
y_variance = variance(self.ball_pos_y_array)
#print(x_variance,y_variance)
#分散が1より大きかったらカウントリセット
if (x_variance > 1 or y_variance > 1):
self.ball_pos_count = 0
self.same_pos_count = 0
for i in range(0,self.ball_frame):
self.ball_pos_x_array[i] = 0
self.ball_pos_y_array[i] = 0
#print(self.ball_pos_count,self.same_pos_count)
if self.calc_flag == True:
a, b = self.reg1dim(self.ball_pos_x_array, self.ball_pos_y_array, self.ball_pos_count)
self.ball_params.set_line_a(a)
self.ball_params.set_line_b(b)
""" #self.ball_vel_x_a, self.ball_vel_x_b = self.reg1dim(self.ball_vel_x_array, self.ball_vel_time_array, self.ball_pos_count)
#self.ball_vel_y_a, self.ball_vel_y_b = self.reg1dim(self.ball_vel_y_array, self.ball_vel_time_array, self.ball_pos_count)
#self.ball_vel_a, self.ball_vel_b = self.reg1dim(self.ball_vel_array, self.ball_vel_time_array, self.ball_pos_count)
#self.ball_params.ball_sub_params.a, self.ball_params.ball_sub_params.b = self.reg1dim(self.ball_vel_x_array, self.ball_vel_time_array, self.ball_pos_count)
# self.ball_params.ball_sub_params.future_x =
# self.ball_params.ball_sub_params.future_y
#rospy.loginfo("vel_x_a:%f\tvel_x_b:%f",self.ball_vel_x_a, self.ball_vel_x_b)
#ボールの予想停止位置を計算
#x,y方向の現在の速度を最小二乗法で求めた直線から計算→式が違う、速度推定が必要
#ball_fit_vel_x = self.ball_vel_x_a*self.ball_vel_time_array[self.ball_pos_count-1] + self.ball_vel_x_b
#ball_fit_vel_y = self.ball_vel_y_a*self.ball_vel_time_array[self.ball_pos_count-1] + self.ball_vel_y_b
#とりあえず現在速度を使う
#ball_fit_vel_x = self.ball_params.get_current_velosity()[0]
#ball_fit_vel_y = self.ball_params.get_current_velosity()[1]
#停止するまでの時間を現在の速度と傾きから計算
if self.ball_vel_x_a != 0 and self.ball_vel_y_a != 0:
self.ball_stop_time_x = -(ball_fit_vel_x / self.ball_vel_x_a)
self.ball_stop_time_y = -(ball_fit_vel_y / self.ball_vel_y_a)
if self.ball_stop_time_x <= 0 or self.ball_stop_time_y <= 0:
# self.ball_params.ball_sub_params.future_x = 0
# self.ball_params.ball_sub_params.future_y = 0
else:
self.ball_params.ball_sub_params.future_x = self.ball_params.get_current_position()[0] + ball_fit_vel_x*self.ball_stop_time_x + 1/2*self.ball_vel_x_a*self.ball_stop_time_x**2
self.ball_params.ball_sub_params.future_y = self.ball_params.get_current_position()[1] + ball_fit_vel_y*self.ball_stop_time_y + 1/2*self.ball_vel_y_a*self.ball_stop_time_y**2
self.ball_params.ball_sub_params.future_x = np.clip(self.ball_params.ball_sub_params.future_x,-5,5)
self.ball_params.ball_sub_params.future_y = np.clip(self.ball_params.ball_sub_params.future_y,-5,5)
#rospy.loginfo("t=(%.3f,%.3f)\t(f_x:n_x)=(%.3f:%.3f)\t(f_y:n_y)=(%.3f:%.3f)",self.ball_stop_time_x,self.ball_stop_time_y,self.ball_params.ball_sub_params.future_x, self.ball_params.get_current_position()[0], self.ball_params.ball_sub_params.future_y, self.ball_params.get_current_position()[1]) """
else:
# self.ball_params.ball_sub_params.a = 0.
# self.ball_params.ball_sub_params.b = 0.
self.ball_params.set_line_a(0.)
self.ball_params.set_line_b(0.)
""" self.ball_vel_x_a = 0.
self.ball_vel_x_b = 0.
self.ball_vel_y_a = 0.
self.ball_vel_y_b = 0.
for i in range(0,self.ball_frame):
self.ball_pos_x_array[i] = 0
self.ball_pos_y_array[i] = 0
self.ball_vel_x_array[i] = 0
self.ball_vel_y_array[i] = 0 """
self.ball_sub_params.a = self.ball_params.get_line_a()
self.ball_sub_params.b = self.ball_params.get_line_b()
#print(self.ball_stop_time_x,self.ball_stop_time_y)
#rospy.loginfo("f=%d\tt=(%.2f,%.2f)\t(f_x:n_x)=(%.3f:%.3f)\t(f_y:n_y)=(%.3f:%.3f)",self.calc_flag,self.ball_stop_time_x,self.ball_stop_time_y,self.ball_params.ball_sub_params.future_x, self.ball_params.get_current_position()[0], self.ball_params.ball_sub_params.future_y, self.ball_params.get_current_position()[1])
def calc_def_pos(self):
# 見づらいのでボールの座標を再代入
ball_x = self.ball_params.get_current_position()[0] # y_B
ball_y = self.ball_params.get_current_position()[1] # x_B
# 壁の座標
def1_pos_x = 0.0
def1_pos_y = 0.0
def2_pos_x = 0.0
def2_pos_y = 0.0
# 各パラメータ計算
a_1 = ball_y - self.g_center_y
b_1 = ball_x - self.g_center_x
c_1 = self.line_down_y*(self.g_center_y - ball_y) + self.line_down_x*(self.g_center_x - ball_x)
a_2 = ball_y - self.g_center_y
b_2 = ball_x - self.g_center_x
c_2 = self.line_up_y*(self.g_center_y - ball_y) + self.line_up_x*(self.g_center_x - ball_x)
a_3 = self.g_center_y - ball_y
b_3 = self.g_center_x - ball_x
c_3 = self.p_area_down_y*(ball_y - self.g_center_y) + self.p_area_down_x*(ball_x - self.g_center_x)
a_4 = ball_x - self.g_up_x
b_4 = self.g_up_y - ball_y
c_4 = ball_y*(self.g_up_x - ball_x) + ball_x*(ball_y - self.g_up_y)
a_5 = ball_x - self.g_down_x
b_5 = self.g_down_y - ball_y
c_5 = ball_y*(self.g_down_x - ball_x) + ball_x*(ball_y - self.g_down_y)
a_6 = self.g_center_y - ball_y
b_6 = self.g_center_x - ball_x
c_6 = self.p_area_up_y*(ball_y - self.g_center_y) + self.p_area_up_x*(ball_x - self.g_center_x)
t = self.offset_r/math.sqrt((self.g_center_y - ball_y)**2 + (self.g_center_x - ball_x)**2)
# 防御ラインの計算
# 最下部
if ball_x <= (self.g_down_x - self.p_area_down_x)/(self.g_down_y - self.p_area_down_y)*(ball_y - self.g_down_y) + self.g_down_x:
self.line_up_r_y = (b_3*c_4 - b_4*c_3)/(a_3*b_4 - a_4*b_3) + (ball_y - self.g_center_y)*t
self.line_up_r_x = (a_3*c_4 - a_4*c_3)/(a_4*b_3 - a_3*b_4) + (ball_x - self.g_center_x)*t
self.line_down_r_y = (b_3*c_5 - b_5*c_3)/(a_3*b_5 - a_5*b_3) + (ball_y - self.g_center_y)*t
self.line_down_r_x = (a_3*c_5 - a_5*c_3)/(a_5*b_3 - a_3*b_5) + (ball_x - self.g_center_x)*t
self.L_a = abs(a_3*ball_y + b_3*ball_x + c_3)/math.sqrt(a_3**2 + b_3**2)
# 下部
elif (ball_x >= (self.g_down_x - self.p_area_down_x)/(self.g_down_y - self.p_area_down_y)*(ball_y - self.g_down_y) + self.g_down_x) and (ball_y <= self.g_center_y):
self.line_down_r_y = (self.g_down_y - ball_y)/(self.g_down_x - ball_x)*(self.p_area_down_x - ball_x) + ball_y + (ball_y - self.g_center_y)*t
self.line_down_r_x = self.p_area_down_x + (ball_x - self.g_center_x)*t
self.line_down_y = (self.g_down_y - ball_y)/(self.g_down_x - ball_x)*(self.p_area_down_x - ball_x) + ball_y
self.line_down_x = self.p_area_down_x
c_1 = self.line_down_y*(self.g_center_y - ball_y) + self.line_down_x*(self.g_center_x - ball_x)
self.line_up_r_y = (b_1*c_4 - b_4*c_1)/(a_1*b_4 - a_4*b_1) + (ball_y - self.g_center_y)*t
self.line_up_r_x = (a_1*c_4 - a_4*c_1)/(a_4*b_1 - a_1*b_4) + (ball_x - self.g_center_x)*t
self.L_a = abs(a_1*ball_y + b_1*ball_x + c_1)/math.sqrt(a_1**2 + b_1**2)
# 上部
elif (ball_x >= (self.g_up_x - self.p_area_up_x)/(self.g_up_y - self.p_area_up_y)*(ball_y - self.g_up_y) + self.g_up_x) and (ball_y > self.g_center_y):
self.line_up_r_y = (self.g_up_y - ball_y)/(self.g_up_x - ball_x)*(self.p_area_up_x - ball_x) + ball_y + (ball_y - self.g_center_y)*t
self.line_up_r_x = self.p_area_up_x + (ball_x - self.g_center_x)*t
self.line_up_y = (self.g_up_y - ball_y)/(self.g_up_x - ball_x)*(self.p_area_up_x - ball_x) + ball_y
self.line_up_x = self.p_area_up_x
c_2 = self.line_up_y*(self.g_center_y - ball_y) + self.line_up_x*(self.g_center_x - ball_x)
self.line_down_r_y = (b_2*c_5 - b_5*c_2)/(a_2*b_5 - a_5*b_2) + (ball_y - self.g_center_y)*t
self.line_down_r_x = (a_2*c_5 - a_5*c_2)/(a_5*b_2 - a_2*b_5) + (ball_x - self.g_center_x)*t
self.L_a = abs(a_2*ball_y + b_2*ball_x + c_2)/math.sqrt(a_2**2 + b_2**2)
# # 最上部
elif ball_x >= (self.g_up_x - self.p_area_up_x)/(self.g_up_y - self.p_area_up_x)*(ball_y - self.g_up_y) + self.g_up_x:
self.line_up_r_y = (b_6*c_4 - b_4*c_6)/(a_6*b_4 - a_4*b_6) + (ball_y - self.g_center_y)*t
self.line_up_r_x = (a_6*c_4 - a_4*c_6)/(a_4*b_6 - a_6*b_4) + (ball_x - self.g_center_x)*t
self.line_down_r_y = (b_6*c_5 - b_5*c_6)/(a_6*b_5 - a_5*b_6) + (ball_y - self.g_center_y)*t
self.line_down_r_x = (a_6*c_5 - a_5*c_6)/(a_5*b_6 - a_6*b_5) + (ball_x - self.g_center_x)*t
self.L_a = abs(a_6*ball_y + b_6*ball_x + c_6)/math.sqrt(a_6**2 + b_6**2)
# その他
else:
self.line_up_r_x = self.p_area_up_x + self.offset_r
self.line_up_r_y = self.g_up_y/2
self.line_down_r_x = self.p_area_down_x + self.offset_r
self.line_down_r_y = self.g_down_y/2
# ここまでが壁の基本位置計算
# ここからがロボットの移動を考慮した位置補正と壁をニアorファーサイドに寄せる計算
# ボールが壁に到達するまでに移動可能な距離の計算
tmp = (self.L_a/self.ball_MAX_SPEED - self.delay_time_ms/1000.0)
if tmp > 0:
self.L_G = self.robot_a*(tmp**2)/2.0
else:
self.L_G = 0
# ボールがハーフラインよりも敵陣側(壁が一台)かつ1台で守れる範囲:パターン1
if (ball_x > 0.5) and (((self.line_up_r_y - self.line_down_r_y)**2 + (self.line_up_r_x - self.line_down_r_x)**2) <= 4.0*((self.L_G + self.robot_r)**2)):
def1_pos_y = (self.line_up_r_y + self.line_down_r_y)/2.0
def1_pos_x = (self.line_up_r_x + self.line_down_r_x)/2.0
def2_pos_y = functions.calculate_internal_dividing_point_vector_args(self.ball_params.get_current_position(), config.GOAL_CENTER, 1, 1)[1]
def2_pos_x = functions.calculate_internal_dividing_point_vector_args(self.ball_params.get_current_position(), config.GOAL_CENTER, 1, 1)[0]
# ボールがハーフラインよりも味方陣側(壁が二台)かつ2台で守れる範囲:パターン2-1,2
elif (ball_x <= 0) and (((self.line_up_r_y - self.line_down_r_y)**2 + (self.line_up_r_x | |
<filename>qtgui/panels/face.py
"""
File: face.py
Author: <NAME>
Email: <EMAIL>
Graphical interface for face detection and recognition.
"""
# pylint --method-naming-style=camelCase --attr-naming-style=camelCase qtgui.panels.face
# standard imports
import logging
# third party imports
import numpy as np
# Qt imports
from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtWidgets import (QGroupBox, QWidget, QLabel,
QVBoxLayout, QHBoxLayout, QGridLayout)
from PyQt5.QtGui import QResizeEvent
# toolbox imports
from toolbox import Toolbox
from dltb.base.data import Data
from dltb.base.image import Image, Imagelike
from dltb.tool import Tool
from dltb.tool.face.detector import Detector as FaceDetector
from dltb.tool.worker import Worker
# GUI imports
from ..utils import QObserver, QBusyWidget, QPrepareButton, protect
from ..widgets.image import QImageView, QImageBatchView
from ..widgets.data import QDataInspector
from ..widgets.tools import QToolComboBox
from .panel import Panel
# logging
LOG = logging.getLogger(__name__)
class QDetectorWidget(QGroupBox, QObserver,
qattributes={Toolbox: False}, qobservables={
Worker: {'tool_changed', 'work_finished', 'busy_changed'}}):
"""A detector widget displays the output of a Detector.
_worker: Worker
_view: QImageView
_label: QLabel
_busy: QBusyWidget
_trueMetadata
"""
def __init__(self, detector: FaceDetector = None, **kwargs):
"""Initialization of the FacePanel.
Parameters
----------
decector: FaceDetector
The face detector providing data.
parent: QWidget
The parent argument is sent to the QWidget constructor.
"""
super().__init__(**kwargs)
self._trueMetadata = None
self._initUI()
self._layoutUI()
self.setWorker(Worker(detector))
self.toggled.connect(self.onToggled)
LOG.info("New QDetectorWidget[%s] initialized: detector=%s",
type(self), detector)
def _initUI(self):
"""Initialize the user interface
The user interface contains the following elements:
* the input view: depicting the current input image
* a loop button: allowing to start and stop loop data sources
* an input counter:
* a process counter:
* up to four detector views: depicting faces located in the input image
"""
self._view = QImageView()
self._batchView = QImageBatchView()
self._prepareButton = QPrepareButton()
self._label = QLabel()
self._busy = QBusyWidget()
self._status = QLabel()
self._toolSelector = QToolComboBox()
self.addAttributePropagation(Toolbox, self._toolSelector)
self._toolSelector.toolSelected.connect(self.onToolSelected)
def _layoutUI(self):
layout = QVBoxLayout()
layout.addWidget(self._view)
layout.addWidget(self._label)
layout.addWidget(self._batchView)
layout.addWidget(self._busy)
layout.addStretch(3)
layout.addWidget(self._toolSelector)
layout.addWidget(self._prepareButton)
layout.addWidget(self._status)
self.setLayout(layout)
self.setCheckable(True)
def setWorker(self, worker: Worker) -> None:
"""Set the worker observed by this :py:class:`QDetectorWidget`.
The widget is initialized with its own private
:py:class:`Worker`, so there is usually no reason to call
this method directly.
"""
self._busy.setBusyObservable(worker)
def faceDetector(self) -> FaceDetector:
"""Get the detector currently applied by this
:py:class:`QDetectorWidget`.
Result
------
detector: FaceDetector
The face detector on `None` if no detector is set.
"""
return self._worker.tool
def setFaceDetector(self, detector: FaceDetector) -> None:
"""Set a new :py:class:`FaceDetector`.
The face detector will inform us whenever new faces where
detected.
"""
LOG.info("setFaceDetector: %s", detector)
if detector is self.faceDetector():
return # Nothing to do
# we want to do timing
if detector is not None:
detector.timer = True
# setting the tool in the worker will indirectly trigger update()
# in the main event loop thread.
self._worker.tool = detector
self._prepareButton.setPreparable(detector)
self._toolSelector.setCurrentTool(detector)
if detector is not None and not detector.busy:
if detector.preparable:
LOG.debug("setFaceDetector: preparing detector")
detector.prepare()
else:
LOG.debug("setFaceDetector: detector is not preparable")
def worker_changed(self, worker: Worker,
change: Worker.Change) -> None:
# pylint: disable=invalid-name
"""React to changes in the observed :py:class:`FaceDetector`.
"""
LOG.debug("QDetectorWidget[%s].worker_changed(chanage=%s): busy=%s",
worker.tool, change, worker.busy)
if change.tool_changed or change.busy_changed:
detector = worker.tool
self.setTitle("None" if detector is None else
(type(detector).__name__ +
(' (busy)' if worker.busy else '')))
if change.tool_changed or change.work_finished:
self.update()
def setData(self, data: Data) -> None:
"""Set a new :py:class:`Data` object to be displayed by this
:py:class:`QDetectorWidget`. The data is expected to an image.
"""
self.setImage(None if not data else data.array, data)
def setImage(self, image: np.ndarray, data: Data = None):
"""Set the image to be worked on by the underlying detector.
"""
LOG.debug("QDetectorWidget[%s].set_image(data=%s, data=%s)",
self._worker.tool,
None if image is None else image.shape, data)
self._trueMetadata = data
if self._worker.ready:
self._worker.work(data, extract=True)
self.update()
def update(self):
"""Update the display of this :py:class:`QDetectorWidget`.
"""
detector = self._worker.tool
if self._worker.tool is None or not self.isChecked():
self._view.setData(None)
self._batchView.setImages(None)
self._label.setText("No detector." if detector is None else "Off.")
self._status.setText("no detector" if detector is None else "off")
return
data = self._worker.data
detections = detector.detections(data)
LOG.debug("QDetectorWidget[%s].update(): data = %s, detections = %s",
detector, data, detections)
self._view.setData(data)
self._status.setText(f"failed: {detector.failed}, "
f"preparable: {detector.preparable}, "
f"prepared: {detector.prepared}")
if detections is None:
self._label.setText("No detections.")
self._batchView.setImages(None)
return
# FIXME[old/todo]
# self._view.showAnnotations(self._trueMetadata, detections)
self._view.setMetadata(detections)
self._batchView.setImages(detector.extractions(data))
duration = detector.duration(data) or -1.0
if detections.has_regions():
count = len(detections.regions)
self._label.setText(f"{count} face{'s' if count >1 else ''} "
f"detected in {duration:.3f}s")
else:
self._label.setText(f"Nothing detected in {duration:.3f}s")
@pyqtSlot(bool)
@protect
def onToggled(self, _state: bool) -> None:
"""We want to update this :py:class:`QDetectorWidget` when it gets
(de)activated.
"""
self.update()
@pyqtSlot(Tool)
@protect
def onToolSelected(self, tool: Tool) -> None:
"""A slot to be informed if a new Tool is selected.
Arguments
---------
tool: Tool
The `tool` is expected to be a face detector, otherwise
it will be treated as `None`, meaning this
:py:class:`QDetectorWidget` will be deactivated
"""
print("QDetectorWidget.onToolSelected:", tool, type(tool))
if not isinstance(tool, FaceDetector):
LOG.warning("%s is not a FaceDetector.", tool)
tool = None
self.setFaceDetector(tool)
class FacePanel(Panel, QObserver, qobservables={Toolbox: {'input_changed'}}):
# pylint: disable=too-many-instance-attributes
"""The :py:class:`FacePanel` provides access to different
face recognition technologies. This includes
* face detection
* face landmarking
* face alignment
* face recogntion
The panel allows to independently select these components (if
possible - some implementations combine individutal steps).
The :py:class:`FacePanel` can be assigned an image to process
using the :py:meth:`setImage`. This will trigger the processing
steps, updating the display(s) accordingly. Alternatively, if
a full data object is available, including image data and
metadata like ground truth annotations, this can be set using
the :py:class:`setData` method (which will internally call
:py:class:`setImage`).
A :py:class:`FacePanel` is associated with a :py:class:`Toolbox`.
It will use the toolbox' input and the `QDataselector` can be
used to change this input.
Face detection
--------------
* Apply face detector to some data source
* Compare multiple face detectors
* Evaluate face detectors
Properties
----------
_toolbox: Toolbox = None
_detectorViews: list = None
_dataView: QDataView = None
_inputCounter: QLabel = None
_processCounter: QLabel = None
_dataInspector: QDataInspector = None
"""
def __init__(self, toolbox: Toolbox = None, **kwargs):
"""Initialization of the FacePanel.
Parameters
----------
toolbox: Toolbox
The toolbox provides input data.
parent: QWidget
The parent argument is sent to the QWidget constructor.
"""
super().__init__(**kwargs)
# name = 'shape_predictor_5_face_landmarks.dat'
name = 'shape_predictor_68_face_landmarks.dat' # FIXME[hack]
self._initUI()
self._layoutUI()
self.setToolbox(toolbox)
self._counter = 0 # FIXME[hack]
def _initUI(self):
"""Initialize the user interface.
The user interface contains the following elements:
* the data selector: depicting the current input image
and allowing to select new inputs from a datasource
* an input counter and a process counter:
* up to four detector views: depicting faces located in
the input image
"""
#
# Input data
#
# QImageView: a widget to display the input data
self._dataInspector = QDataInspector(orientation=Qt.Vertical)
self._dataView = self._dataInspector.dataView()
self._dataView.addAttribute('filename')
self._dataView.addAttribute('basename')
self._dataView.addAttribute('directory')
self._dataView.addAttribute('path')
self._dataView.addAttribute('regions')
self._dataView.addAttribute('image')
self._inputCounter = QLabel("0")
self._processCounter = QLabel("0")
self._detectorViews = []
for detector in range(2):
LOG.info("FacePanel._initUI(): add detector view %s", detector)
self._detectorViews.append(QDetectorWidget())
def _layoutUI(self):
"""Initialize the user interface of this :py:class:`FacePanel`.
"""
# The big picture:
#
# +--------------------+----------------------------------------+
# |+------------------+|+---------------+ +---------------+ ... |
# ||dataInspector |||QDetectorWidget| |QDetectorWidget| |
# ||[view] ||| Result | | Result | |
# || ||| | | | |
# || ||| | | | |
# || ||| | | | |
# || ||| Controls | | Controls | |
# || ||| | | | |
# ||[navigator] ||| | | | |
# || ||| | | | |
# || ||| Selector | | Selector | |
# |+------------------+|+---------------+ +---------------+ ... |
# +--------------------+----------------------------------------+
layout = QHBoxLayout()
layout2 = QVBoxLayout()
layout2.addWidget(self._dataInspector)
row = QHBoxLayout()
row.addWidget(self._processCounter)
row.addWidget(QLabel("/"))
row.addWidget(self._inputCounter)
row.addStretch()
layout2.addLayout(row)
layout2.addStretch(1)
layout.addLayout(layout2)
layout.setStretchFactor(layout2, 1)
grid = QGridLayout()
for i, view in enumerate(self._detectorViews):
grid.addWidget(view, i//2, i % 2)
layout.addLayout(grid)
layout.setStretchFactor(grid, 1)
self.setLayout(layout)
@staticmethod
def _detectorWidget(name: str, widget: QWidget):
layout = QVBoxLayout()
layout.addWidget(widget)
layout.addWidget(QLabel(name))
groupBox = QGroupBox(name)
groupBox.setLayout(layout)
groupBox.setCheckable(True)
return groupBox
def setImage(self, image: Imagelike) -> None:
"""Set the image for this :py:class:`FacePanel`. This
will initiate the processing of this image using the
current tools.
"""
self.setData(Image.as_data(image))
def setData(self, data: Data) -> None:
"""Set the data to be processed by this :py:class:`FacePanel`.
"""
# set data for the dataView - this is redundant if data is set
# from the toolbox (as the dataView also observes the toolbox),
# but it is necessary, if setData is called independently.
self._dataView.setData(data)
# now feed the new data to the detecotors
| |
the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS''',
'gnu2': ''' GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause | |
<reponame>anshul96go/toolbox
from qnt.data.common import *
from qnt.data.secgov import load_facts
import itertools
import pandas as pd
import datetime as dt
from qnt.log import log_info, log_err
def load_indicators(
assets,
time_coord,
standard_indicators=None,
builders = None,
start_date_offset = datetime.timedelta(days=365*2),
fill_strategy=lambda xarr: xarr.ffill('time')
):
cik2id = dict((a['cik'], a['id']) for a in assets if a.get('cik') is not None)
min_date = pd.Timestamp(time_coord.min().values).to_pydatetime().date() - parse_tail(start_date_offset)
max_date = pd.Timestamp(time_coord.max().values).to_pydatetime().date()
indicator_dicts = load_indicator_dicts(list(cik2id.keys()), standard_indicators, builders, min_date, max_date)
dfs = []
for (cik, inds) in indicator_dicts:
series = [pd.Series(v if len(v) > 0 else {min_date.isoformat(): np.nan}, dtype=np.float64, name=k)
for (k,v) in inds.items()]
df = pd.concat(series, axis=1)
df.index = df.index.astype(dtype=time_coord.dtype,copy=False)
df = df.unstack().to_xarray().rename({'level_0':'field', 'level_1': 'time'})
df.name = cik2id[cik]
dfs.append(df)
if len(dfs) is 0:
return None # TODO
idc_arr = xr.concat(dfs, pd.Index([d.name for d in dfs], name='asset'))
idc_arr = xr.align(idc_arr, time_coord, join='outer')[0]
idc_arr = idc_arr.sel(time = np.sort(idc_arr.time.values))
idc_arr = fill_strategy(idc_arr)
idc_arr = idc_arr.sel(time=time_coord)
idc_arr.name = "secgov_indicators"
return idc_arr
secgov_load_indicators = deprecated_wrap(load_indicators)
def load_indicator_dicts(ciks, standard_indicators=None, builders=None, min_date=None, max_date=None, tail=DEFAULT_TAIL):
if builders is None:
builders = []
else:
builders = list(builders)
if standard_indicators is None:
builders = builders + standard_indicator_builders
else:
for a in standard_indicators:
for si in standard_indicator_builders:
if si.alias == a:
builders.append(si)
fact_names = [f for b in builders for f in b.facts]
fact_names = set(fact_names)
fact_names = list(fact_names)
for g in load_facts(ciks, fact_names, min_date=min_date, max_date=max_date, skip_segment=True, tail=tail,
columns=['cik', 'report_id', 'report_type', 'report_date', 'fact_name', 'period', 'period_length'],
group_by_cik=True):
indicators = dict()
for b in builders:
data = [d for d in g[1] if d['fact_name'] in b.facts]
indicators[b.alias] = b.build_series_dict(data)
yield (g[0], indicators)
secgov_load_indicator_dicts = deprecated_wrap(load_indicator_dicts)
class IndicatorBuilder:
facts = None
alias = None
use_report_date = None
sort_key = None
group_key = None
def __init__(self, alias, facts, use_report_date):
self.facts = facts
self.alias = alias
self.use_report_date = use_report_date
if(self.use_report_date):
self.sort_key = lambda f: (f['report_date'], f['period'], f['report_id'], -self.facts.index(f['fact_name']))
else:
self.sort_key = lambda f: (f['period'], f['report_date'], f['report_id'], -self.facts.index(f['fact_name']))
def build_series_dict(self, fact_data):
pass
class InstantIndicatorBuilder(IndicatorBuilder):
def __init__(self, alias, facts, use_report_date):
super().__init__(alias, facts, use_report_date)
self.group_key=(lambda f: f['report_date']) if self.use_report_date else (lambda f: f['period'])
def build_series_dict(self, fact_data):
fact_data = sorted(fact_data, key=self.sort_key, reverse=True)
groups = itertools.groupby(fact_data,self.group_key)
return dict((g[0], next(g[1])['value']) for g in groups)
class SimplePeriodIndicatorBuilder(IndicatorBuilder):
periods = None
"""
qf, representing quarterly values
af, representing annual values
saf, representing semi-annual values
"""
def __init__(self, alias, facts, use_report_date, periods):
super().__init__(alias, facts, use_report_date)
self.periods = periods
self.group_key=(lambda f: f['report_date']) if self.use_report_date else (lambda f: f['period'][1])
def build_series_dict(self, fact_data):
fact_data = sorted(fact_data, key=self.sort_key, reverse=True)
# TODO restore missed semi-annual facts
# TODO restore missed quarter facts
# TODO ltm
if self.periods == 'qf':
fact_data = [f for f in fact_data if 80 < f['period_length'] < 100]
elif self.periods == 'saf':
fact_data = [f for f in fact_data if 170 < f['period_length'] < 190]
elif self.periods == 'af':
fact_data = [f for f in fact_data if 355 < f['period_length'] < 375]
groups = itertools.groupby(fact_data,self.group_key)
return dict((g[0] , next(g[1])['value']) for g in groups)
class PeriodIndicatorBuilder(IndicatorBuilder):
periods = None
"""
qf, representing quarterly values
af, representing annual values
ltm, representing LTM (last twelve months) values
"""
def __init__(self, alias, facts, use_report_date, periods):
super().__init__(alias, facts, use_report_date)
self.periods = periods
if self.use_report_date:
self.sort_key = lambda f: (f['report_date'], f['period'], f['report_id'], -self.facts.index(f['fact_name']))
else:
self.sort_key = lambda f: (f['period'], f['report_date'], f['report_id'], -self.facts.index(f['fact_name']))
self.group_key = (lambda f: f['report_date']) if self.use_report_date else (lambda f: f['period'][1])
def build_series_dict(self, fact_data):
fact_data = sorted(fact_data, key=self.sort_key, reverse=True)
if self.periods == 'ltm':
result = self.build_ltm(fact_data)
return dict((item[1].date().isoformat(), item[0]) for item in reversed(result))
elif self.periods == 'qf':
result = self.build_series_qf(fact_data)
return dict((item[1] , item[0]) for item in reversed(result))
elif self.periods == 'af':
fact_data = [f for f in fact_data if 340 < f['period_length'] < 380]
groups = itertools.groupby(fact_data,self.group_key)
return dict((g[0] , next(g[1])['value']) for g in groups)
def build_series_qf(self, fact_data):
# from the earliest reports to new ones
fact_data = sorted(fact_data, key=self.sort_key)
result = []
all_info = []
# For each report...
groups = itertools.groupby(fact_data,self.group_key)
for g in groups:
local_facts = list(g[1])
#identify the report type
Q_report = False
K_report = False
q_indexis = []
k_indexis = []
# Form all info list and find indexis for quarter and annual facts
for i, f in enumerate(local_facts):
if f['value'] is not None:
all_info.append([f['period'],f['value']])
if f['period_length'] is not None:
if (75 < f['period_length'] < 120): q_indexis.append(i)
if (340 < f['period_length'] < 380): k_indexis.append(i)
if f['report_type'] in ['10-Q','10-Q/A']: Q_report = True
if f['report_type'] in ['10-K','10-K/A']: K_report = True
# Quarter info only
if Q_report and (len(q_indexis) > 0) and not K_report:
result.append([local_facts[q_indexis[-1]]['value'],g[0]])
# Annual report but all periods are quarters
elif K_report and (len(k_indexis)) == 0 and (len(q_indexis) > 0) and not Q_report:
result.append([local_facts[q_indexis[-1]]['value'],g[0]])
# Both reports at the same report date - take the most actual info
elif Q_report and K_report and (len(k_indexis)) > 0 and (len(q_indexis) > 0):
last_k_date = dt.datetime.strptime(local_facts[k_indexis[-1]]['period'][1], '%Y-%m-%d')
last_q_date = dt.datetime.strptime(local_facts[q_indexis[-1]]['period'][1], '%Y-%m-%d')
if last_q_date > last_k_date:
result.append([local_facts[q_indexis[-1]]['value'],g[0]])
else:
result.append([local_facts[k_indexis[-1]]['value'],g[0]])
# Mixed info
elif K_report and (len(k_indexis)) > 0 and (len(q_indexis) > 0) and not Q_report:
last_q_date = dt.datetime.strptime(local_facts[q_indexis[-1]]['period'][1], '%Y-%m-%d')
last_k_date = dt.datetime.strptime(local_facts[k_indexis[-1]]['period'][1], '%Y-%m-%d')
first_k_date = dt.datetime.strptime(local_facts[k_indexis[-1]]['period'][0], '%Y-%m-%d')
# I may contains 4th quarter info separately
if (last_k_date - dt.timedelta(days = 5)) < last_q_date < (last_k_date + dt.timedelta(days = 5)):
result.append([local_facts[q_indexis[-1]]['value'],g[0]])
# If not, one can exctract it from other periods
else:
local_value = local_facts[k_indexis[-1]]['value']
if local_value is None:
temp = np.nan
else:
temp = previous_3_quarters(all_info, first_k_date, local_facts[k_indexis[-1]]['value'])
result.append([temp,g[0]])
# Annual info only
elif K_report and (len(k_indexis)) > 0 and (len(q_indexis) == 0) and not Q_report:
first_k_date = dt.datetime.strptime(local_facts[k_indexis[-1]]['period'][0], '%Y-%m-%d')
local_value = local_facts[k_indexis[-1]]['value']
if local_value is None:
temp = np.nan
else:
temp = previous_3_quarters(all_info, first_k_date, local_facts[k_indexis[-1]]['value'])
result.append([temp,g[0]])
#All other cases
elif (K_report or Q_report) and len(local_facts) > 0:
if local_facts[-1]['value'] is not None \
and local_facts[-1]['period_length'] is not None \
and local_facts[-1]['period_length'] > 0:
temp = local_facts[-1]['value']/local_facts[-1]['period_length']*90
result.append([temp,g[0]])
#We have tried
else:
result.append([np.nan,g[0]])
return result
def build_ltm(self, fact_data):
# averaging period
avg_time_frame = 360
sort_type = lambda f: (f[1])
data_list = self.build_series_qf(fact_data)
#check data
if len(data_list) == 0:
return []
annual_value_list = []
annual_date_list = []
result = []
#sort data
data_list = sorted(data_list,key=sort_type)
# the day we stop ltm
end_date = dt.datetime.strptime(data_list[-1][1], '%Y-%m-%d')
# add new events to a data: end of info shelf life
add_list = []
for item in data_list:
loop_date = dt.datetime.strptime(item[1], '%Y-%m-%d') + dt.timedelta(days = 365)
add_list.append([0,loop_date.strftime('%Y-%m-%d')])
data_list = data_list + add_list
data_list = sorted(data_list,key=sort_type)
# for event in data list..
for item in data_list:
loop_date = dt.datetime.strptime(item[1], '%Y-%m-%d')
if (len(annual_value_list)==0) or (len(annual_date_list)==0):
start_date = loop_date
annual_value_list = []
annual_date_list = []
dist = (loop_date - start_date).days
# In case of weak data, we will not create synthetic one
if (end_date - loop_date).days < 0: break
# If less than year -> take into account new data
if dist < avg_time_frame:
if (item[0] is not None):
if (item[0] != 0) and (~np.isnan(item[0])):
annual_value_list.append(item[0])
annual_date_list.append(loop_date)
# otherwise -> save result and drop it
else:
# Company might have a lot of reports. We might have some overlaps.
# But there is only 4 quarters per year anyway
local_value = np.nansum(annual_value_list)/len(annual_value_list)*4
result.append([local_value,loop_date])
if (item[0] is not None):
if (item[0] != 0) and (~np.isnan(item[0])):
annual_value_list.append(item[0])
annual_date_list.append(loop_date)
annual_value_list.pop(0)
annual_date_list.pop(0)
if len(annual_date_list) > 0:
start_date = annual_date_list[0]
else:
start_date = 0
return result
def previous_3_quarters(full_list, start_time, val):
ind1 = 0
ind2 = 0
ind3 = 0
ind12 = 0
ind23 = 0
local_index = []
# Searching for available timeframes
for i, info in enumerate(full_list):
left_bound = dt.datetime.strptime(info[0][0], '%Y-%m-%d')
right_bound = dt.datetime.strptime(info[0][1], '%Y-%m-%d')
left_index1 = (left_bound - dt.timedelta(days = 10)) < start_time < (left_bound + dt.timedelta(days = 10))
left_index2 = (left_bound - dt.timedelta(days = 110)) < start_time < (left_bound - dt.timedelta(days = 70))
left_index3 = (left_bound - dt.timedelta(days = 210)) < start_time < (left_bound - dt.timedelta(days = 150))
if left_index1:
dist = (right_bound - left_bound).days
if 80< dist< 120:
local_index.extend([info[1], '1']) # first quarter
elif 150 < dist< 200 :
local_index.extend([info[1], '12']) # first and second quarters
elif 250 < dist< 290 :
local_index.extend([info[1], '123']) # first, second and third quarters -> exit
return info[1]
if | |
nitems = m_%s.receive(data, minReturned, maxReturned);\n" % xactor_name)
f_out.write (" if (nitems == 0)\n")
f_out.write (" return nitems;\n\n")
f_out.write ("%s\n" % resizeparams)
f_out.write (" for (int i=0; i<nitems; i++) {\n")
for verilog_name in xactor.verilog_names:
f_out.write (" %s[i] = data[i].m_field_%s;\n" % (verilog_name, verilog_name))
f_out.write (" }\n")
f_out.write (" return nitems;\n")
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("\n")
def generateOutputRecvMethods(f_out, prefix, emu_type, new_module_name, xactor_name, xactor):
typ = "BitT<%d>" % xactor.field_width
f_out.write ("bool %sXactor::%sreceive_%s(%s &%s_data)\n" % (new_module_name, prefix, xactor_name, typ, xactor_name))
f_out.write ("{\n")
f_out.write (" %s_%s data;\n" % (new_module_name, xactor_name))
f_out.write (" bool gotone = m_%s.receiveNB(data);\n" % xactor_name)
f_out.write (" if (gotone) {\n")
f_out.write (" %s_data = data;\n" % xactor_name)
f_out.write (" }\n")
f_out.write (" return gotone;\n")
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("bool %sXactor::%sreceiveB_%s(%s &%s_data)\n" % (new_module_name, prefix, xactor_name, typ, xactor_name))
f_out.write ("{\n")
f_out.write (" %s_%s data;\n" % (new_module_name, xactor_name))
f_out.write (" m_%s.receive(data);\n" % xactor_name)
f_out.write (" %s_data = data;\n" % xactor_name)
f_out.write (" return true;\n")
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("bool %sXactor::%sset_emulation_type_%s(BitT<1> &t)\n" % (new_module_name, prefix, xactor_name))
f_out.write ("{\n")
f_out.write (" m_%s_ctrl.sendAcknowledge(" << "t);\n" % xactor_name)
f_out.write (" return true;\n")
f_out.write ("}\n")
f_out.write ("\n")
def generateOutputPipeRecvMethods(f_out, prefix, emu_type, new_module_name, xactor_name, xactor):
typ = "BitT<%d>" % xactor.field_width
f_out.write ("bool %sXactor::%sreceive_%s(%s &%s_data)\n" % (new_module_name, prefix, xactor_name, typ, xactor_name))
f_out.write ("{\n")
f_out.write (" bool gotone = m_%s.receiveNB(%s_data);\n" % (xactor_name, xactor_name))
f_out.write (" return gotone;\n")
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("bool %sXactor::%sreceiveB_%s(%s &%s_data)\n" % (new_module_name, prefix, xactor_name, typ, xactor_name))
f_out.write ("{\n")
f_out.write (" m_%s.receive(%s_data);\n" % (xactor_name, xactor_name))
f_out.write (" return true;\n")
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("unsigned %sXactor::%svector_receive_%s(std::vector<%s > &%s_data" % (new_module_name, prefix, xactor_name, typ, xactor_name))
f_out.write (", unsigned minReturned, unsigned maxReturned)\n")
f_out.write ("{\n")
f_out.write (" unsigned nitems = m_%s.receive(%s_data, minReturned, maxReturned);\n" % (xactor_name, xactor_name))
f_out.write (" return nitems;\n")
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("bool %sXactor::%sset_emulation_type_%s(BitT<1> &t)\n" % (new_module_name, prefix, xactor_name))
f_out.write ("{\n")
f_out.write (" m_%s_ctrl.sendAcknowledge(t);\n" % xactor_name)
f_out.write (" return true;\n")
f_out.write ("}\n")
f_out.write ("\n")
def genDutXactorImpl (output_dir, dut_ifc, new_module_name):
impl_filename = os.path.join (output_dir, "%sXactor.cpp" % new_module_name)
try:
f_impl = open (impl_filename, "w")
except:
print "Error opening file: ", impl_filename
return 1
print "Generating file: ", impl_filename
genDutXactorImplFile (f_impl, dut_ifc, new_module_name)
f_impl.close()
return 0
def genDutXactorImplFile (f_out, dut_ifc, new_module_name):
f_out.write ("// Copyright Bluespec Inc. 2012-2013\n")
f_out.write ("// By: GenTestbench tool\n\n")
f_out.write ("#include <iostream>\n")
f_out.write ("#include \"%sXactor.h\"\n" % new_module_name)
f_out.write ("\n")
f_out.write ("using namespace std;\n")
f_out.write ("\n")
f_out.write ("%sXactor *%sXactor::m_xactor = NULL;\n\n" % (new_module_name, new_module_name))
f_out.write ("%sXactor *%sXactor::init(SceMi *scemi)\n" % (new_module_name, new_module_name))
f_out.write ("{\n")
f_out.write (" if (m_xactor != NULL)\n")
f_out.write (" return m_xactor;\n\n")
f_out.write (" m_xactor = new %sXactor(scemi);\n\n" % new_module_name)
f_out.write (" return m_xactor;\n")
f_out.write ("}\n\n")
f_out.write ("void %sXactor::destroy()\n" % new_module_name)
f_out.write ("{\n")
f_out.write (" delete m_xactor;\n")
f_out.write (" m_xactor = NULL;\n")
f_out.write ("}\n\n")
f_out.write ("%sXactor::%sXactor(SceMi *scemi)\n" % (new_module_name, new_module_name))
f_out.write (" : DutXactor(scemi)\n")
# **************************
# Contructor initialization
# **************************
first = 1
for xactor_name in dut_ifc.xactors:
xactor = dut_ifc.xactors [xactor_name]
if isinstance (xactor, Xactor_PUT_IFC):
f_out.write (" , m_%s (\"\", \"scemi_put_%s_inport\", XactorAdapter::InPort)\n" % (xactor_name, xactor_name))
elif isinstance (xactor, Xactor_PIPEPUT_IFC):
f_out.write (" , m_%s (\"\", \"scemi_put_%s_inpipe\", XactorAdapter::InPipe)\n" % (xactor_name, xactor_name))
for xactor_name in dut_ifc.xactors:
xactor = dut_ifc.xactors [xactor_name]
if isinstance (xactor, Xactor_GET_IFC):
f_out.write (" , m_%s (\"\", \"scemi_get_%s_outport\", XactorAdapter::OutPort)\n" % (xactor_name, xactor_name))
elif isinstance (xactor, Xactor_PIPEGET_IFC):
f_out.write (" , m_%s (\"\", \"scemi_get_%s_outpipe\", XactorAdapter::OutPipe)\n" % (xactor_name, xactor_name))
for xactor_name in dut_ifc.xactors:
xactor = dut_ifc.xactors [xactor_name]
if isinstance (xactor, Xactor_Raw_In_IFC):
f_out.write (" , m_%s (\"\", \"scemi_put_%s_inport\", XactorAdapter::InPort)\n" % (xactor_name, xactor_name))
f_out.write (" , m_%s_ctrl (\"\", \"scemi_put_%s_ctrl_in\", XactorAdapter::InPort)\n" % (xactor_name, xactor_name))
elif isinstance (xactor, Xactor_PIPE_IN_IFC):
f_out.write (" , m_%s (\"\", \"scemi_put_%s_inpipe\", XactorAdapter::InPipe)\n" % (xactor_name, xactor_name))
f_out.write (" , m_%s_ctrl (\"\", \"scemi_put_%s_ctrl_in\", XactorAdapter::InPort)\n" % (xactor_name, xactor_name))
for xactor_name in dut_ifc.xactors:
xactor = dut_ifc.xactors [xactor_name]
if isinstance (xactor, Xactor_Raw_Out_IFC):
f_out.write (" , m_%s (\"\", \"scemi_get_%s_outport\", XactorAdapter::OutPort)\n" % (xactor_name, xactor_name))
f_out.write (" , m_%s_ctrl (\"\", \"scemi_get_%s_ctrl_in\", XactorAdapter::InPort)\n" % (xactor_name, xactor_name))
elif isinstance (xactor, Xactor_PIPE_OUT_IFC):
f_out.write (" , m_%s (\"\", \"scemi_get_%s_outpipe\", XactorAdapter::OutPipe)\n" % (xactor_name, xactor_name))
f_out.write (" , m_%s_ctrl (\"\", \"scemi_get_%s_ctrl_in\", XactorAdapter::InPort)\n" % (xactor_name, xactor_name))
f_out.write ("{\n")
f_out.write ("}\n")
f_out.write ("\n")
# **************************
# Destructor
# **************************
f_out.write ("%sXactor::~%sXactor()\n" % (new_module_name, new_module_name))
f_out.write ("{\n")
f_out.write ("}\n")
f_out.write ("\n")
# ******
# send
# ******
for xactor_name in dut_ifc.xactors:
xactor = dut_ifc.xactors [xactor_name]
if isinstance (xactor, Xactor_PUT_IFC) or isinstance (xactor, Xactor_PIPEPUT_IFC):
generatePutSendMethods (f_out, "", new_module_name, xactor_name, xactor)
for xactor_name in dut_ifc.xactors:
xactor = dut_ifc.xactors [xactor_name]
if isinstance (xactor, Xactor_Raw_In_IFC) or isinstance (xactor, Xactor_PIPE_IN_IFC):
generateInputSendMethods(f_out, "", 0, new_module_name, xactor_name, xactor)
# *****
# get
# *****
for xactor_name in dut_ifc.xactors:
xactor = dut_ifc.xactors [xactor_name]
if isinstance (xactor, Xactor_GET_IFC) or isinstance (xactor, Xactor_PIPEGET_IFC):
generateGetRecvMethods (f_out, "", new_module_name, xactor_name, xactor)
for xactor_name in dut_ifc.xactors:
xactor = dut_ifc.xactors [xactor_name]
if isinstance (xactor, Xactor_Raw_Out_IFC):
generateOutputRecvMethods(f_out, "", 0, new_module_name, xactor_name, xactor)
elif isinstance (xactor, Xactor_PIPE_OUT_IFC):
generateOutputPipeRecvMethods(f_out, "", 0, new_module_name, xactor_name, xactor)
def generateInlinePutSend (f_out, prefix, new_module_name, xactor_name, xactor):
params = ""
params2 = ""
pipeparams = ""
first = 1
for j in range (len (xactor.verilog_names)):
if (first == 1):
first = 0
firstvector = xactor.verilog_names[j]
else:
params += ", "
params2 += ", "
pipeparams += ", "
params += "BitT<%d> &%s" % (xactor.field_widths[j], xactor.verilog_names[j])
params2 += xactor.verilog_names[j]
pipeparams += "std::vector<BitT<%d> > &%s" % (xactor.field_widths[j], xactor.verilog_names[j])
f_out.write ("inline bool %sput_%s(%s)\n" % (prefix, xactor_name, params))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->put_%s(%s);\n" % (new_module_name, xactor_name, params2))
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("inline bool %sputB_%s(%s)\n" % (prefix, xactor_name, params))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->putB_%s(%s);\n" % (new_module_name, xactor_name, params2))
f_out.write ("}\n")
f_out.write ("\n")
if isinstance (xactor, Xactor_PIPEPUT_IFC):
f_out.write ("inline bool %svector_put_%s(%s)\n" % (prefix, xactor_name, pipeparams))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->vector_put_%s(%s);\n" % (new_module_name, xactor_name, params2))
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("inline bool %svector_putB_%s(%s)\n" % (prefix, xactor_name, pipeparams))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->vector_putB_%s(%s);\n" % (new_module_name, xactor_name, params2))
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("inline bool %svector_putAck_%s(%s)\n" % (prefix, xactor_name, pipeparams))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->vector_putAck_%s(%s);\n" % (new_module_name, xactor_name, params2))
f_out.write ("}\n")
f_out.write ("\n")
def generateInlineInputSend (f_out, prefix, new_module_name, xactor_name, xactor):
typ = "BitT<%d>" % xactor.field_width
f_out.write ("inline bool %ssend_%s(%s &%s_data)\n" % (prefix, xactor_name, typ, xactor_name))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->send_%s(%s_data);\n" % (new_module_name, xactor_name, xactor_name))
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("inline bool %ssendB_%s(%s &%s_data)\n" % (prefix, xactor_name, typ, xactor_name))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->sendB_%s(%s_data);\n" % (new_module_name, xactor_name, xactor_name))
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("inline bool %ssendBAck_%s(%s &%s_data)\n" % (prefix, xactor_name, typ, xactor_name))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->sendBAck_%s(%s_data);\n" % (new_module_name, xactor_name, xactor_name))
f_out.write ("}\n")
f_out.write ("\n")
if isinstance (xactor, Xactor_PIPE_IN_IFC):
f_out.write ("inline bool %svector_send_%s(std::vector<%s > &%s_data)\n" % (prefix, xactor_name, typ, xactor_name))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->vector_send_%s(%s_data);\n" % (new_module_name, xactor_name, xactor_name))
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("inline bool %svector_sendB_%s(std::vector<%s > &%s_data)\n" % (prefix, xactor_name, typ, xactor_name))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->vector_sendB_%s(%s_data);\n" % (new_module_name, xactor_name, xactor_name))
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("inline bool %svector_sendAck_%s(std::vector<%s > &%s_data)\n" % (prefix, xactor_name, typ, xactor_name))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->vector_sendAck_%s(%s_data);\n" % (new_module_name, xactor_name, xactor_name))
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("inline bool %sset_emulation_type_%s(EmulationPortType t)\n" % (prefix, xactor_name))
f_out.write ("{\n")
f_out.write (" BitT<1> data = t;\n")
f_out.write (" return %sXactor::get()->set_emulation_type_%s(data);\n" % (new_module_name, xactor_name))
f_out.write ("}\n")
f_out.write ("\n")
def generateInlineGetRecv (f_out, prefix, new_module_name, xactor_name, xactor):
params = ""
params2 = ""
pipeparams = ""
first = 1
for j in range (len (xactor.verilog_names)):
if (first == 1):
first = 0
else:
params += ", "
params2 += ", "
pipeparams += ", "
params += "BitT<%s> &%s" % (xactor.field_widths[j], xactor.verilog_names[j])
params2 += xactor.verilog_names[j]
pipeparams += "std::vector<BitT<%s> > &%s" % (xactor.field_widths[j], xactor.verilog_names[j])
f_out.write ("inline bool %sget_%s(%s)\n" % (prefix, xactor_name, params))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->get_%s(%s);\n" % (new_module_name, xactor_name, params2))
f_out.write ("}\n")
f_out.write ("\n")
f_out.write ("inline bool %sgetB_%s(%s)\n" % (prefix, xactor_name, params))
f_out.write ("{\n")
f_out.write (" return %sXactor::get()->getB_%s(%s);\n" % (new_module_name, xactor_name, params2))
f_out.write ("}\n")
f_out.write ("\n")
if isinstance (xactor, Xactor_PIPEGET_IFC):
f_out.write ("inline unsigned %svector_get_%s(%s" % (prefix, xactor_name, pipeparams))
f_out.write (", unsigned minReturned, unsigned maxReturned)\n")
f_out.write ("{\n")
f_out.write (" unsigned nitems;\n\n")
f_out.write (" nitems = %sXactor::get()->vector_get_%s(%s, minReturned, maxReturned);\n" % (new_module_name, xactor_name, params2))
f_out.write (" return nitems;\n")
| |
in the database.
:param str messageid: Id of the message to search. Note that messageid
is a string with the format msg-\d{1,3}.
:return: True if the message is in the database. False otherwise.
'''
return self.get_message(messageid) is not None
def get_message_time(self, messageid):
'''
Get the time when the message was sent.
:param str messageid: Id of the message to search. Note that messageid
is a string with the format msg-\d{1,3}.
:return: message time as a string or None if that message does not
exist.
:raises ValueError: if messageId is not well formed
'''
raise NotImplementedError("")
#ACCESSING THE USER and USER_PROFILE tables
def get_users(self):
'''
Extracts all users in the database.
:return: list of Users of the database. Each user is a dictionary
that contains two keys: ``nickname``(str) and ``registrationdate``
(long representing UNIX timestamp). None is returned if the database
has no users.
'''
#Create the SQL Statements
#SQL Statement for retrieving the users
query = 'SELECT users.*, users_profile.* FROM users, users_profile \
WHERE users.user_id = users_profile.user_id'
#Activate foreign key support
self.set_foreign_keys_support()
#Create the cursor
self.con.row_factory = sqlite3.Row
cur = self.con.cursor()
#Execute main SQL Statement
cur.execute(query)
#Process the results
rows = cur.fetchall()
if rows is None:
return None
#Process the response.
users = []
for row in rows:
users.append(self._create_user_list_object(row))
return users
def get_user(self, nickname):
'''
Extracts all the information of a user.
:param str nickname: The nickname of the user to search for.
:return: dictionary with the format provided in the method:
:py:meth:`_create_user_object`
'''
#Create the SQL Statements
#SQL Statement for retrieving the user given a nickname
query1 = 'SELECT user_id from users WHERE nickname = ?'
#SQL Statement for retrieving the user information
query2 = 'SELECT users.*, users_profile.* FROM users, users_profile \
WHERE users.user_id = ? \
AND users_profile.user_id = users.user_id'
#Variable to be used in the second query.
user_id = None
#Activate foreign key support
self.set_foreign_keys_support()
#Cursor and row initialization
self.con.row_factory = sqlite3.Row
cur = self.con.cursor()
#Execute SQL Statement to retrieve the id given a nickname
pvalue = (nickname,)
cur.execute(query1, pvalue)
#Extract the user id
row = cur.fetchone()
if row is None:
return None
user_id = row["user_id"]
# Execute the SQL Statement to retrieve the user invformation.
# Create first the valuse
pvalue = (user_id, )
#execute the statement
cur.execute(query2, pvalue)
#Process the response. Only one posible row is expected.
row = cur.fetchone()
return self._create_user_object(row)
def delete_user(self, nickname):
'''
Remove all user information of the user with the nickname passed in as
argument.
:param str nickname: The nickname of the user to remove.
:return: True if the user is deleted, False otherwise.
'''
#Create the SQL Statements
#SQL Statement for deleting the user information
query = 'DELETE FROM users WHERE nickname = ?'
#Activate foreign key support
self.set_foreign_keys_support()
#Cursor and row initialization
self.con.row_factory = sqlite3.Row
cur = self.con.cursor()
#Execute the statement to delete
pvalue = (nickname,)
cur.execute(query, pvalue)
self.con.commit()
#Check that it has been deleted
if cur.rowcount < 1:
return False
return True
def modify_user(self, nickname, user):
'''
Modify the information of a user.
:param str nickname: The nickname of the user to modify
:param dict user: a dictionary with the information to be modified. The
dictionary has the following structure:
.. code-block:: javascript
{'public_profile':{'registrationdate':,'signature':'',
'avatar':''},
'restricted_profile':{'firstname':'','lastname':'',
'email':'', 'website':'','mobile':'',
'skype':'','age':'','residence':'',
'gender':'', 'picture':''}
}
where:
* ``registrationdate``: UNIX timestamp when the user registered
in the system (long integer)
* ``signature``: text chosen by the user for signature
* ``avatar``: name of the image file used as avatar
* ``firstanme``: given name of the user
* ``lastname``: family name of the user
* ``email``: current email of the user.
* ``website``: url with the user's personal page. Can be None
* ``mobile``: string showing the user's phone number. Can be
None.
* ``skype``: user's nickname in skype. Can be None.
* ``residence``: complete user's home address.
* ``picture``: file which contains an image of the user.
* ``gender``: User's gender ('male' or 'female').
* ``age``: integer containing the age of the user.
Note that all values are string if they are not otherwise indicated.
:return: the nickname of the modified user or None if the
``nickname`` passed as parameter is not in the database.
:raise ValueError: if the user argument is not well formed.
'''
#Create the SQL Statements
#SQL Statement for extracting the userid given a nickname
query1 = 'SELECT user_id from users WHERE nickname = ?'
#SQL Statement to update the user_profile table
query2 = 'UPDATE users_profile SET firstname = ?,lastname = ?, \
email = ?,website = ?, \
picture = ?,mobile = ?, \
skype = ?,age = ?,residence = ?, \
gender = ?,signature = ?,avatar = ?\
WHERE user_id = ?'
#temporal variables
user_id = None
p_profile = user['public_profile']
r_profile = user['restricted_profile']
_firstname = r_profile.get('firstname', None)
_lastname = r_profile.get('lastname', None)
_email = r_profile.get('email', None)
_website = r_profile.get('website', None)
_picture = r_profile.get('picture', None)
_mobile = r_profile.get('mobile', None)
_skype = r_profile.get('skype', None)
_age = r_profile.get('age', None)
_residence = r_profile.get('residence', None)
_gender = r_profile.get('gender', None)
_signature = p_profile.get('signature', None)
_avatar = p_profile.get('avatar', None)
#Activate foreign key support
self.set_foreign_keys_support()
#Cursor and row initialization
self.con.row_factory = sqlite3.Row
cur = self.con.cursor()
#Execute the statement to extract the id associated to a nickname
pvalue = (nickname,)
cur.execute(query1, pvalue)
#Only one value expected
row = cur.fetchone()
#if does not exist, return
if row is None:
return None
else:
user_id = row["user_id"]
#execute the main statement
pvalue = (_firstname, _lastname, _email, _website, _picture,
_mobile, _skype, _age, _residence, _gender,
_signature, _avatar, user_id)
cur.execute(query2, pvalue)
self.con.commit()
#Check that I have modified the user
if cur.rowcount < 1:
return None
return nickname
def append_user(self, nickname, user):
'''
Create a new user in the database.
:param str nickname: The nickname of the user to modify
:param dict user: a dictionary with the information to be modified. The
dictionary has the following structure:
.. code-block:: javascript
{'public_profile':{'registrationdate':,'signature':'',
'avatar':''},
'restricted_profile':{'firstname':'','lastname':'',
'email':'', 'website':'','mobile':'',
'skype':'','age':'','residence':'',
'gender':'', 'picture':''}
}
where:
* ``registrationdate``: UNIX timestamp when the user registered
in the system (long integer)
* ``signature``: text chosen by the user for signature
* ``avatar``: name of the image file used as avatar
* ``firstanme``: given name of the user
* ``lastname``: family name of the user
* ``email``: current email of the user.
* ``website``: url with the user's personal page. Can be None
* ``mobile``: string showing the user's phone number. Can be
None.
* ``skype``: user's nickname in skype. Can be None.
* ``residence``: complete user's home address.
* ``picture``: file which contains an image of the user.
* ``gender``: User's gender ('male' or 'female').
* ``age``: integer containing the age of the user.
Note that all values are string if they are not otherwise indicated.
:return: the nickname of the modified user or None if the
``nickname`` passed as parameter is not in the database.
:raise ValueError: if the user argument is not well formed.
'''
#Create the SQL Statements
#SQL Statement for extracting the userid given a nickname
query1 = 'SELECT user_id from users WHERE nickname = ?'
#SQL Statement to create the row in users table
query2 = 'INSERT INTO users(nickname,regDate,lastLogin,timesviewed)\
VALUES(?,?,?,?)'
#SQL Statement to create the row in user_profile table
query3 = 'INSERT INTO users_profile (user_id, firstname,lastname, \
email,website, \
picture,mobile, \
skype,age,residence, \
gender,signature,avatar)\
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)'
#temporal variables for user table
#timestamp will be used for lastlogin and regDate.
timestamp = time.mktime(datetime.now().timetuple())
timesviewed = 0
#temporal variables for user profiles
p_profile = user['public_profile']
r_profile = user['restricted_profile']
_firstname = r_profile.get('firstname', None)
_lastname = r_profile.get('lastname', None)
_email = r_profile.get('email', None)
_website = r_profile.get('website', None)
_picture = r_profile.get('picture', None)
_mobile = r_profile.get('mobile', None)
_skype = r_profile.get('skype', None)
_age = r_profile.get('age', None)
_residence = r_profile.get('residence', None)
_gender = r_profile.get('gender', None)
_signature = p_profile.get('signature', None)
_avatar = p_profile.get('avatar', None)
#Activate foreign key support
self.set_foreign_keys_support()
#Cursor and row initialization
self.con.row_factory = sqlite3.Row
| |
import sys, inspect, copy
import numpy as np
from collections import OrderedDict
from ..data.mfstructure import DatumType
from ..data import mfstructure, mfdatautil, mfdata
from ..data.mfdatautil import MultiList
from ..mfbase import ExtFileAction, MFDataException
from ..utils.mfenums import DiscretizationType
class MFArray(mfdata.MFMultiDimVar):
"""
Provides an interface for the user to access and update MODFLOW array data.
Parameters
----------
sim_data : MFSimulationData
data contained in the simulation
structure : MFDataStructure
describes the structure of the data
data : list or ndarray
actual data
enable : bool
enable/disable the array
path : tuple
path in the data dictionary to this MFArray
dimensions : MFDataDimensions
dimension information related to the model, package, and array
Methods
-------
new_simulation : (sim_data : MFSimulationData)
initialize MFArray object for a new simulation
supports_layered : bool
Returns whether this MFArray supports layered data
set_layered_data : (layered_data : bool)
Sets whether this MFArray supports layered data
store_as_external_file : (external_file_path : string, multiplier : float,
layer_num : int)
Stores data from layer "layer_num" to an external file at
"external_file_path" with a multiplier "multiplier". For unlayered
data do not pass in "layer".
store_as_internal_array : (multiplier : float, layer_num : int)
Stores data from layer "layer_num" internally within the MODFLOW file
with a multiplier "multiplier". For unlayered data do not pass in
"layer".
has_data : (layer_num : int) : bool
Returns whether layer "layer_num" has any data associated with it.
For unlayered data do not pass in "layer".
get_data : (layer_num : int) : ndarray
Returns the data associated with layer "layer_num". If "layer_num" is
None, returns all data.
set_data : (data : ndarray/list, multiplier : float, layer_num : int)
Sets the contents of the data at layer "layer_num" to "data" with
multiplier "multiplier". For unlayered
data do not pass in "layer_num". data can have the following formats:
1) ndarray - numpy ndarray containing all of the data
2) [data] - python list containing all of the data
3) val - a single constant value to be used for all of the data
4) {'filename':filename, 'factor':fct, 'iprn':print, 'data':data} -
dictionary defining external file information
5) {'data':data, 'factor':fct, 'iprn':print) - dictionary defining
internal information. Data that is layered can also be set by defining
a list with a length equal to the number of layers in the model.
Each layer in the list contains the data as defined in the
formats above:
[layer_1_val, [layer_2_array_vals],
{'filename':file_with_layer_3_data, 'factor':fct, 'iprn':print}]
load : (first_line : string, file_handle : file descriptor,
block_header : MFBlockHeader, pre_data_comments : MFComment) :
tuple (bool, string)
Loads data from first_line (the first line of data) and open file
file_handle which is pointing to the second line of data. Returns a
tuple with the first item indicating whether all data was read and
the second item being the last line of text read from the file.
get_file_entry : (layer : int) : string
Returns a string containing the data in layer "layer". For unlayered
data do not pass in "layer".
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, sim_data, structure, data=None, enable=True, path=None,
dimensions=None):
super(MFArray, self).__init__(sim_data, structure, enable, path,
dimensions)
if self.structure.layered:
try:
self._layer_shape = self.layer_shape()
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'resolving layer dimensions',
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, None,
self._simulation_data.debug, ex)
else:
self._layer_shape = (1,)
if self._layer_shape[0] is None:
self._layer_shape = (1,)
self._data_type = structure.data_item_structures[0].type
try:
shp_ml = MultiList(shape=self._layer_shape)
self._data_storage = self._new_storage(shp_ml.get_total_size()
!= 1)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(structure.get_model(),
structure.get_package(), path,
'creating storage', structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, None,
sim_data.debug, ex)
self._last_line_info = []
if self.structure.type == DatumType.integer:
multiplier = [1]
else:
multiplier = [1.0]
if data is not None:
try:
self._get_storage_obj().set_data(data, key=self._current_key,
multiplier=multiplier)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'setting data',
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, None,
self._simulation_data.debug, ex)
def __setattr__(self, name, value):
if name == '__setstate__':
raise AttributeError(name)
elif name == 'fname':
self._get_storage_obj().layer_storage.first_item().fname = value
elif name == 'factor':
self._get_storage_obj().layer_storage.first_item().factor = value
elif name == 'iprn':
self._get_storage_obj().layer_storage.first_item().iprn = value
elif name == 'binary':
self._get_storage_obj().layer_storage.first_item().binary = value
else:
super(MFArray, self).__setattr__(name, value)
def __getitem__(self, k):
if isinstance(k, int):
k = (k,)
storage = self._get_storage_obj()
if storage.layered and (isinstance(k, tuple) or isinstance(k, list)):
if not storage.layer_storage.in_shape(k):
comment = 'Could not retrieve layer {} of "{}". There' \
'are only {} layers available' \
'.'.format(k, self.structure.name,
len(storage.layer_storage))
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'getting data',
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, comment,
self._simulation_data.debug)
# for layered data treat k as layer number(s)
return storage.layer_storage[k]
else:
# for non-layered data treat k as an array/list index of the data
if isinstance(k, int):
try:
if len(self.get_data(apply_mult=True).shape) == 1:
return self.get_data(apply_mult=True)[k]
elif self.get_data(apply_mult=True).shape[0] == 1:
return self.get_data(apply_mult=True)[0, k]
elif self.get_data(apply_mult=True).shape[1] == 1:
return self.get_data(apply_mult=True)[k, 0]
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'setting data',
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, None,
self._simulation_data.debug, ex)
comment = 'Unable to resolve index "{}" for ' \
'multidimensional data.'.format(k)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'getting data',
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, comment,
self._simulation_data.debug)
else:
try:
if isinstance(k, tuple):
if len(k) == 3:
return self.get_data(apply_mult=True)[k[0], k[1], k[2]]
elif len(k) == 2:
return self.get_data(apply_mult=True)[k[0], k[1]]
if len(k) == 1:
return self.get_data(apply_mult=True)[k]
else:
return self.get_data(apply_mult=True)[(k,)]
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'setting data',
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, None,
self._simulation_data.debug, ex)
def __setitem__(self, k, value):
storage = self._get_storage_obj()
if storage.layered:
if isinstance(k, int):
k = (k,)
# for layered data treat k as a layer number
try:
storage.layer_storage[k].set_data(value)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'setting data',
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, None,
self._simulation_data.debug, ex)
else:
try:
# for non-layered data treat k as an array/list index of the data
a = self.get_data()
a[k] = value
a = a.astype(self.get_data().dtype)
layer_storage = storage.layer_storage.first_item()
self._get_storage_obj().set_data(a, key=self._current_key,
multiplier=layer_storage.factor)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'setting data',
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, None,
self._simulation_data.debug, ex)
def new_simulation(self, sim_data):
super(MFArray, self).new_simulation(sim_data)
self._data_storage = self._new_storage(False)
self._layer_shape = (1,)
def supports_layered(self):
try:
model_grid = self._data_dimensions.get_model_grid()
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'getting model grid',
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, None,
self._simulation_data.debug, ex)
return self.structure.layered and \
model_grid.grid_type() != DiscretizationType.DISU
def set_layered_data(self, layered_data):
if layered_data is True and self.structure.layered is False:
if self._data_dimensions.get_model_grid().grid_type() == \
DiscretizationType.DISU:
comment = 'Layered option not available for unstructured ' \
'grid. {}'.format(self._path)
else:
comment = 'Data "{}" does not support layered option. ' \
'{}'.format(self._data_name, self._path)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'setting layered data', self.structure.name,
inspect.stack()[0][3], type_, value_,
traceback_, comment,
self._simulation_data.debug)
self._get_storage_obj().layered = layered_data
def make_layered(self):
if self.supports_layered():
try:
self._get_storage_obj().make_layered()
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'making data layered',
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, None,
self._simulation_data.debug, ex)
else:
if self._data_dimensions.get_model_grid().grid_type() == \
DiscretizationType.DISU:
comment = 'Layered option not available for unstructured ' \
'grid. {}'.format(self._path)
else:
comment = 'Data "{}" does not support layered option. ' \
'{}'.format(self._data_name, self._path)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'converting data to layered',
self.structure.name,
inspect.stack()[0][3], type_, value_,
traceback_, comment,
self._simulation_data.debug)
def store_as_external_file(self, external_file_path, multiplier=[1.0],
layer=None):
if isinstance(layer, int):
layer = (layer,)
storage = self._get_storage_obj()
if storage is None:
self._set_storage_obj(self._new_storage(False, True))
ds_index = self._resolve_layer_index(layer)
try:
# move data to file
if storage.layer_storage[ds_index[0]].data_storage_type == \
mfdata.DataStorageType.external_file:
storage.external_to_external(external_file_path, multiplier,
layer)
else:
storage.internal_to_external(external_file_path, multiplier,
layer)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self._path,
'storing data in external file '
'{}'.format(external_file_path),
self.structure.name,
inspect.stack()[0][3], type_,
value_, traceback_, None,
self._simulation_data.debug, ex)
# update data storage
self._get_storage_obj().layer_storage[ds_index[0]].data_storage_type \
= mfdata.DataStorageType.external_file
self._get_storage_obj().layer_storage[ds_index[0]].fname = \
external_file_path
if multiplier is not None:
self._get_storage_obj().layer_storage[ds_index[0]].multiplier = \
multiplier[0]
def has_data(self, layer=None):
storage = self._get_storage_obj()
if storage is None:
return False
if isinstance(layer, int):
layer = (layer,)
try:
| |
self.params.train_dir is None:
print('note that train_dir is not specified')
# raise ValueError('Trained model directory not specified')
try:
global_step, checkpoint_path = load_checkpoint(saver, sess, self.my_params.load_ckpt)
self.last_weights_file = checkpoint_path
print('got global_step={} in checkpoint {}'.format(global_step, checkpoint_path))
except CheckpointNotFoundException:
log_fn('Checkpoint not found in %s' % self.my_params.load_ckpt)
return
if self.dataset.queue_runner_required():
tf.train.start_queue_runners(sess=sess)
image_producer = None
if input_producer_op is not None:
image_producer = cnn_util.ImageProducer(
sess, input_producer_op, self.batch_group_size,
self.params.use_python32_barrier)
image_producer.start()
if enqueue_ops:
for i in xrange(len(enqueue_ops)):
sess.run(enqueue_ops[:(i + 1)])
if image_producer is not None:
image_producer.notify_image_consumption()
loop_start_time = start_time = time.time()
# TODO(laigd): refactor the part to compute/report the accuracy. Currently
# it only works for image models.
top_1_accuracy_sum = 0.0
top_5_accuracy_sum = 0.0
loss_sum = 0.0
total_eval_count = self.num_batches * self.batch_size
print('total_eval_count=', total_eval_count)
# print('----------show var values before eval---------')
# for v in self.get_global_variables():
# if 'global' in v.name:
# continue
# print(v.name, np.mean(sess.run(v)))
for step in xrange(self.num_batches):
if (summary_writer is not None and summary_op is not None and self.params.save_summaries_steps > 0 and
(step + 1) % self.params.save_summaries_steps == 0):
results, summary_str = sess.run([fetches, summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str)
else:
results = sess.run(fetches, feed_dict=feed_dict)
results = self.model.postprocess(results)
top_1_accuracy_sum += results['top_1_accuracy']
top_5_accuracy_sum += results['top_5_accuracy']
loss_sum += results['loss']
if (step + 1) % self.params.display_every == 0:
duration = time.time() - start_time
examples_per_sec = (
self.batch_size * self.params.display_every / duration)
log_fn('%i\t%.1f examples/sec' % (step + 1, examples_per_sec))
start_time = time.time()
if image_producer is not None:
image_producer.notify_image_consumption()
loop_end_time = time.time()
if image_producer is not None:
image_producer.done()
accuracy_at_1 = top_1_accuracy_sum / self.num_batches
print('top1={}/{}={}'.format(top_1_accuracy_sum, self.num_batches, accuracy_at_1))
accuracy_at_5 = top_5_accuracy_sum / self.num_batches
mean_loss = loss_sum / self.num_batches
summary = tf.Summary()
summary.value.add(tag='eval/Accuracy@1', simple_value=accuracy_at_1)
summary.value.add(tag='eval/Accuracy@5', simple_value=accuracy_at_5)
for result_key, result_value in results.items():
if result_key.startswith(constants.SIMPLE_VALUE_RESULT_PREFIX):
prefix_len = len(constants.SIMPLE_VALUE_RESULT_PREFIX)
summary.value.add(tag='eval/' + result_key[prefix_len:],
simple_value=result_value)
if summary_writer is not None:
summary_writer.add_summary(summary, global_step)
log_fn('Accuracy @ 1 = %.4f Accuracy @ 5 = %.4f Loss = %.8f [%d examples]' %
(accuracy_at_1, accuracy_at_5, mean_loss, total_eval_count))
elapsed_time = loop_end_time - loop_start_time
images_per_sec = (self.num_batches * self.batch_size / elapsed_time)
# Note that we compute the top 1 accuracy and top 5 accuracy for each
# batch, which will have a slight performance impact.
if self.my_params.save_hdf5:
self.save_weights_to_hdf5(self.my_params.save_hdf5)
log_fn('-' * 64)
log_fn('total images/sec: %.2f' % images_per_sec)
log_fn('-' * 64)
if self.benchmark_logger:
eval_result = {
'eval_top_1_accuracy', accuracy_at_1,
'eval_top_5_accuracy', accuracy_at_5,
'eval_average_examples_per_sec', images_per_sec,
tf.GraphKeys.GLOBAL_STEP, global_step,
}
self.benchmark_logger.log_evaluation_result(eval_result)
lf = self.my_params.eval_log_file or OVERALL_EVAL_RECORD_FILE
log_important('{},{},top1={:.5f},top5={:.5f},loss={:.8f} on {} at {}'.format(self.params.model, self.last_weights_file or self.my_params.load_ckpt or self.my_params.init_hdf5,
accuracy_at_1, accuracy_at_5, mean_loss, self.subset, cur_time()), log_file=lf)
GPU_CACHED_INPUT_VARIABLE_NAME = 'gpu_cached_inputs'
# shawn
# overwrite this if you wish to use another convnet_builder (bds convnet builder, for example)
def get_convnet_builder(self, input_list, phase_train):
images = input_list[0]
assert self.params.data_format in ['NCHW', 'NHWC']
if self.params.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
var_type = tf.float32
data_type = tf.float16 if self.params.use_fp16 else tf.float32
if data_type == tf.float16 and self.params.fp16_vars:
var_type = tf.float16
# shawn,
# input_nchan=3 any exceptions ?
convnet_builder = ConvNetBuilder(images, input_nchan=3, phase_train=phase_train, use_tf_layers=self.params.use_tf_layers,
data_format=self.params.data_format, dtype=data_type, variable_dtype=var_type, use_dense_layer=self.my_params.use_dense_layer, input_rotation=self.my_params.input_rotation)
return convnet_builder
def postprocess_after_build_by_convnet_builder(self, convnet_builder, build_results):
print('nothing to do after build by convnet builder')
def do_train(self, graph_info):
"""Benchmark the graph.
Args:
graph_info: the namedtuple returned by _build_graph() which
contains all necessary information to benchmark the graph, including
named tensors/ops list, fetches, etc.
Returns:
Dictionary containing training statistics (num_workers, num_steps,
average_wall_time, images_per_sec).
"""
if self.params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
# First worker will be 'chief' - it will write summaries and
# save checkpoints.
is_chief = hvd.rank() == 0
else:
is_chief = (not self.job_name or self.task_index == 0)
summary_op = tf.summary.merge_all()
# summary_op = tf.group(summary_op, graph_info.summary_op_group)
# summary_op = tf.group(*graph_info.summary_ops)
summary_writer = None
if (is_chief and self.params.summary_verbosity and self.params.train_dir and
self.params.save_summaries_steps > 0):
summary_writer = tf.summary.FileWriter(self.params.train_dir,
tf.get_default_graph())
# We want to start the benchmark timer right after a image_producer barrier
# and avoids undesired waiting times on barriers.
if ((self.num_warmup_batches + len(graph_info.enqueue_ops) - 1) %
self.batch_group_size) != 0:
self.num_warmup_batches = int(
math.ceil(
(self.num_warmup_batches + len(graph_info.enqueue_ops) - 1.0) /
(self.batch_group_size)) * self.batch_group_size -
len(graph_info.enqueue_ops) + 1)
log_fn('Round up warm up steps to %d to match batch_group_size' %
self.num_warmup_batches)
assert ((self.num_warmup_batches + len(graph_info.enqueue_ops) - 1) %
self.batch_group_size) == 0
# We run the summaries in the same thread as the training operations by
# passing in None for summary_op to avoid a summary_thread being started.
# Running summaries and training operations in parallel could run out of
# GPU memory.
if is_chief and not self.forward_only_and_freeze:
saver = tf.train.Saver(
self.variable_mgr.savable_variables(),
save_relative_paths=True,
max_to_keep=self.params.max_ckpts_to_keep)
else:
saver = None
ready_for_local_init_op = None
if self.job_name and not (self.single_session or
self.distributed_collective):
# In distributed mode, we don't want to run local_var_init_op_group until
# the global variables are initialized, because local_var_init_op_group
# may use global variables (such as in distributed replicated mode). We
# don't set this in non-distributed mode, because in non-distributed mode,
# local_var_init_op_group may itself initialize global variables (such as
# in replicated mode).
ready_for_local_init_op = tf.report_uninitialized_variables(
tf.global_variables())
if self.params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
bcast_global_variables_op = hvd.broadcast_global_variables(0)
else:
bcast_global_variables_op = None
if self.params.variable_update == 'collective_all_reduce':
# It doesn't matter what this collective_graph_key value is,
# so long as it's > 0 and the same at every worker.
init_run_options = tf.RunOptions()
init_run_options.experimental.collective_graph_key = 6
else:
init_run_options = tf.RunOptions()
sv = MySupervisor(
# For the purpose of Supervisor, all Horovod workers are 'chiefs',
# since we want session to be initialized symmetrically on all the
# workers.
is_chief=is_chief or (self.params.variable_update == 'horovod'
or self.distributed_collective),
# Log dir should be unset on non-chief workers to prevent Horovod
# workers from corrupting each other's checkpoints.
logdir=self.params.train_dir if is_chief else None,
ready_for_local_init_op=ready_for_local_init_op,
local_init_op=graph_info.local_var_init_op_group,
saver=saver,
global_step=graph_info.global_step,
summary_op=None,
save_model_secs=self.params.save_model_secs,
summary_writer=summary_writer,
local_init_run_options=init_run_options,
load_ckpt_full_path=self.my_params.load_ckpt,
auto_continue=self.my_params.auto_continue)
step_train_times = []
start_standard_services = (
self.params.train_dir or
self.dataset.queue_runner_required())
target = self.cluster_manager.get_target() if self.cluster_manager else ''
#shawn
sess_context = sv.managed_session(
master=target,
config=create_config_proto(self.params),
start_standard_services=start_standard_services)
with sess_context as sess:
self.sess = sess
if self.params.backbone_model_path is not None:
self.model.load_backbone_model(sess, self.params.backbone_model_path)
if bcast_global_variables_op:
sess.run(bcast_global_variables_op)
image_producer = None
if graph_info.input_producer_op is not None:
image_producer = cnn_util.ImageProducer(
sess, graph_info.input_producer_op, self.batch_group_size,
self.params.use_python32_barrier)
image_producer.start()
if graph_info.enqueue_ops:
for i in xrange(len(graph_info.enqueue_ops)):
sess.run(graph_info.enqueue_ops[:(i + 1)])
if image_producer is not None:
image_producer.notify_image_consumption()
self.init_global_step, = sess.run([graph_info.global_step])
print('the current global step is ', self.init_global_step)
if self.job_name and not self.params.cross_replica_sync:
# TODO(zhengxq): Do we need to use a global step watcher at all?
global_step_watcher = GlobalStepWatcher(
sess, graph_info.global_step,
self.num_workers * self.num_warmup_batches +
self.init_global_step,
self.num_workers * (self.num_warmup_batches + self.num_batches) - 1)
global_step_watcher.start()
else:
global_step_watcher = None
if self.graph_file is not None:
path, filename = os.path.split(self.graph_file)
as_text = filename.endswith('txt')
log_fn('Writing GraphDef as %s to %s' % ( # pyformat break
'text' if as_text else 'binary', self.graph_file))
tf.train.write_graph(sess.graph.as_graph_def(add_shapes=True), path,
filename, as_text)
log_fn('Running warm up')
local_step = -1 * self.num_warmup_batches
if self.single_session:
# In single session mode, each step, the global_step is incremented by
# 1. In non-single session mode, each step, the global_step is
# incremented once per worker. This means we need to divide
# init_global_step by num_workers only in non-single session mode.
end_local_step = self.num_batches - self.init_global_step
else:
end_local_step = self.num_batches - (self.init_global_step /
self.num_workers)
if not global_step_watcher:
# In cross-replica sync mode, all workers must run the same number of
# local steps, or else the workers running the extra step will block.
done_fn = lambda: local_step >= end_local_step
else:
done_fn = global_step_watcher.done
if self.params.debugger is not None:
if self.params.debugger == 'cli':
log_fn('The CLI TensorFlow debugger will be used.')
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
self.sess = sess
else:
log_fn('The TensorBoard debugger plugin will be used.')
sess = tf_debug.TensorBoardDebugWrapperSession(sess, self.params.debugger)
self.sess = sess
profiler = tf.profiler.Profiler() if self.params.tfprof_file else None
loop_start_time = time.time()
last_average_loss = None
##########
# shawn
# if self.my_params.init_hdf5:
# self.load_weights_from_hdf5(self.my_params.init_hdf5)
# print('----------show var values before train---------')
# for v in self.get_global_variables():
# if 'global' in v.name:
# continue
# print(v.name, np.mean(sess.run(v)))
print('self.lr_boundaries=', self.lr_boundaries)
while not done_fn():
if local_step == 0:
log_fn('Done warm up')
if graph_info.execution_barrier:
log_fn('Waiting for other replicas to finish warm up')
sess.run([graph_info.execution_barrier])
# TODO(laigd): rename 'Img' to maybe 'Input'.
header_str = ('Step\tImg/sec\t' +
self.params.loss_type_to_report.replace('/', ' '))
if self.params.print_training_accuracy or | |
quarters, the search
result will list 15 different files. If we want to download a
`~lightkurve.collections.LightCurveFileCollection` object containing all
15 observations, use::
>>> search_result.download_all() # doctest: +SKIP
or we can specify the downloaded products by limiting our search::
>>> lcf = search_lightcurvefile('Kepler-10', quarter=2).download() # doctest: +SKIP
The above line of code will only search and download Quarter 2 data and
create a `LightCurveFile` object called lcf.
We can also pass a radius into `search_lightcurvefile` to perform a cone search::
>>> search_lightcurvefile('Kepler-10', radius=100, quarter=4) # doctest: +SKIP
This will display a table containing all targets within 100 arcseconds of
Kepler-10 and in Quarter 4. We can then download a
`~lightkurve.collections.LightCurveFileCollection` containing all these
products using::
>>> search_lightcurvefile('kepler-10', radius=100, quarter=4).download_all() # doctest: +SKIP
"""
try:
return _search_products(target, radius=radius, filetype="Lightcurve",
cadence=cadence, mission=mission,
provenance_name=author,
quarter=quarter, month=month,
campaign=campaign, sector=sector, limit=limit)
except SearchError as exc:
log.error(exc)
return SearchResult(None)
def search_tesscut(target, sector=None):
"""Searches MAST for TESS Full Frame Image cutouts containing a desired target or region.
This feature uses the `TESScut service <https://mast.stsci.edu/tesscut/>`_
provided by the TESS data archive at MAST. If you use this service in
your work, please `cite TESScut <https://ascl.net/code/v/2239>`_ in your
publications.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
Target around which to search. Valid inputs include:
* The name of the object as a string, e.g. "Kepler-10".
* The KIC or EPIC identifier as an integer, e.g. 11904151.
* A coordinate string in decimal format, e.g. "285.67942179 +50.24130576".
* A coordinate string in sexagesimal format, e.g. "19:02:43.1 +50:14:28.7".
* An `astropy.coordinates.SkyCoord` object.
sector : int or list
TESS Sector number. Default (None) will return all available sectors. A
list of desired sectors can also be provided.
Returns
-------
result : :class:`SearchResult` object
Object detailing the data products found.
"""
try:
return _search_products(target, filetype="ffi", mission='TESS', sector=sector)
except SearchError as exc:
log.error(exc)
return SearchResult(None)
def _search_products(target, radius=None, filetype="Lightcurve", cadence=None,
mission=('Kepler', 'K2', 'TESS'),
provenance_name=('Kepler', 'K2', 'SPOC'),
t_exptime=(0, 9999), quarter=None, month=None,
campaign=None, sector=None, limit=None,
**extra_query_criteria):
"""Helper function which returns a SearchResult object containing MAST
products that match several criteria.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
See docstrings above.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds. If `None` then we default to 0.0001 arcsec.
filetype : {'Target pixel', 'Lightcurve', 'FFI'}
Type of files queried at MAST.
cadence : 'long', 'short', 'fast', or float
'long' selects 10-min and 30-min cadence products;
'short' selects 1-min and 2-min products;
'fast' selects 20-sec products.
Alternatively, you can pass the exact exposure time in seconds as
an int or a float, e.g. ``cadence=600`` selects 10-minute cadence.
By default, all cadence modes are returned.
mission : str, list of str
'Kepler', 'K2', or 'TESS'. By default, all will be returned.
provenance_name : str, list of str
Provenance of the data product. Defaults to official products, i.e.
('Kepler', 'K2', 'SPOC'). Community-provided products such as 'K2SFF'
are supported as well.
quarter, campaign, sector : int, list of ints
Kepler Quarter, K2 Campaign, or TESS Sector number.
By default all quarters/campaigns/sectors will be returned.
month : 1, 2, 3, 4 or list of int
For Kepler's prime mission, there are three short-cadence
TargetPixelFiles for each quarter, each covering one month.
Hence, if cadence='short' you can specify month=1, 2, 3, or 4.
By default all months will be returned.
limit : int
Maximum number of products to return
Returns
-------
SearchResult : :class:`SearchResult` object.
"""
if isinstance(target, int):
if (0 < target) and (target < 13161030):
log.warning("Warning: {} may refer to a different Kepler or TESS target. "
"Please add the prefix 'KIC' or 'TIC' to disambiguate."
"".format(target))
elif (0 < 200000000) and (target < 251813739):
log.warning("Warning: {} may refer to a different K2 or TESS target. "
"Please add the prefix 'EPIC' or 'TIC' to disambiguate."
"".format(target))
# Ensure mission is a list
mission = np.atleast_1d(mission).tolist()
# Avoid filtering on `provenance_name` if `author` equals "any" or "all"
if provenance_name in ("any", "all") or provenance_name is None:
provenance_name = None
else:
provenance_name = np.atleast_1d(provenance_name).tolist()
# Speed up by restricting the MAST query if we don't want FFI image data
extra_query_criteria = {}
if filetype in ['Lightcurve', 'Target Pixel']:
# At MAST, non-FFI Kepler pipeline products are known as "cube" products,
# and non-FFI TESS pipeline products are listed as "timeseries".
extra_query_criteria['dataproduct_type'] = ['cube', 'timeseries']
# Make sure `search_tesscut` always performs a cone search (i.e. always
# passed a radius value), because strict target name search does not apply.
if filetype.lower() == 'ffi' and radius is None:
radius = .0001 * u.arcsec
observations = _query_mast(target, radius=radius,
project=mission,
provenance_name=provenance_name,
t_exptime=t_exptime,
sequence_number=campaign or sector,
**extra_query_criteria)
log.debug("MAST found {} observations. "
"Now querying MAST for the corresponding data products."
"".format(len(observations)))
if len(observations) == 0:
raise SearchError('No data found for target "{}".'.format(target))
# Light curves and target pixel files
if filetype.lower() != 'ffi':
from astroquery.mast import Observations
products = Observations.get_product_list(observations)
result = join(observations, products, keys="obs_id", join_type='right',
uniq_col_name='{col_name}{table_name}', table_names=['', '_products'])
result.sort(['distance', 'obs_id'])
# Add the user-friendly 'author' column (synonym for 'provenance_name')
result['author'] = result['provenance_name']
# Add the user-friendly 'observation' column
result['observation'] = None
obs_prefix = {'Kepler': 'Quarter', 'K2': 'Campaign', 'TESS': 'Sector'}
for idx in range(len(result)):
obs_project = result['project'][idx]
obs_seqno = result['sequence_number'][idx]
# Kepler sequence_number values were not populated at the time of
# writing this code, so we parse them from the description field.
if obs_project == 'Kepler' and result['sequence_number'].mask[idx]:
try:
obs_seqno = re.findall(r".*Q(\d+)", result['description'][idx])[0]
except IndexError:
obs_seqno = ""
result['observation'][idx] = "{} {} {}".format(obs_project,
obs_prefix.get(obs_project, ""),
obs_seqno)
masked_result = _filter_products(result, filetype=filetype,
campaign=campaign, quarter=quarter,
cadence=cadence, project=mission,
provenance_name=provenance_name,
month=month, sector=sector, limit=limit)
log.debug("MAST found {} matching data products.".format(len(masked_result)))
masked_result['distance'].info.format = '.1f' # display <0.1 arcsec
return SearchResult(masked_result)
# Full Frame Images
else:
cutouts = []
for idx in np.where(['TESS FFI' in t for t in observations['target_name']])[0]:
# if target passed in is a SkyCoord object, convert to RA, dec pair
if isinstance(target, SkyCoord):
target = '{}, {}'.format(target.ra.deg, target.dec.deg)
# pull sector numbers
s = observations['sequence_number'][idx]
# if the desired sector is available, add a row
if s in np.atleast_1d(sector) or sector is None:
cutouts.append({'description': f'TESS FFI Cutout (sector {s})',
'observation': f'TESS Sector {s}',
'target_name': str(target),
'targetid': str(target),
't_exptime': observations['t_exptime'][idx],
'productFilename': 'TESSCut',
'provenance_name': 'MAST',
'author': 'MAST',
'distance': 0.0,
'sequence_number': s,
'project': 'TESS',
'obs_collection': 'TESS'}
)
if len(cutouts) > 0:
log.debug("Found {} matching cutouts.".format(len(cutouts)))
masked_result = Table(cutouts)
masked_result.sort(['distance', 'sequence_number'])
else:
masked_result = None
return SearchResult(masked_result)
def _query_mast(target, radius=None,
project=('Kepler', 'K2', 'TESS'),
provenance_name=("Kepler", "K2", "SPOC"),
t_exptime=(0, 9999),
sequence_number=None,
**extra_query_criteria):
"""Helper function which wraps `astroquery.mast.Observations.query_criteria()`
to return a table of all Kepler/K2/TESS observations of a given target.
By default only the official data products are returned, but this can be
adjusted by adding alternative data product names into `provenance_name`.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
See docstrings above.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds. If `None` then we default to 0.0001 arcsec.
project : str, list of str
Mission name. Typically 'Kepler', 'K2', or 'TESS'.
This parameter is case-insensitive.
provenance_name : str, list of str
Provenance of the observation. Common options include 'Kepler', 'K2',
'SPOC', 'K2SFF', 'EVEREST', 'KEPSEISMIC'.
This parameter is case-insensitive.
t_exptime : (float, float) tuple
Exposure time range in seconds. Common values include `(59, 61)`
for Kepler short cadence and `(1799, 1801)` for Kepler long cadence.
sequence_number : int, list of int
Quarter, Campaign, or Sector number.
**extra_query_criteria : kwargs
Extra criteria to be passed to `astroquery.mast.Observations.query_criteria`.
Returns
-------
obs : astropy.Table
Table detailing the available observations on MAST.
"""
# Local astroquery import because the package is not used elsewhere
from astroquery.mast import Observations
from astroquery.exceptions import ResolverError, NoResultsWarning
# If passed a SkyCoord, convert it to an "ra, dec" string for MAST
if isinstance(target, SkyCoord):
target = | |
LETTER AU
0A15 GURMUKHI LETTER KA
0A16 GURMUKHI LETTER KHA
0A17 GURMUKHI LETTER GA
0A18 GURMUKHI LETTER GHA
0A19 GURMUKHI LETTER NGA
0A1A GURMUKHI LETTER CA
0A1B GURMUKHI LETTER CHA
0A1C GURMUKHI LETTER JA
0A1D GURMUKHI LETTER JHA
0A1E GURMUKHI LETTER NYA
0A1F GURMUKHI LETTER TTA
0A20 GURMUKHI LETTER TTHA
0A21 GURMUKHI LETTER DDA
0A22 GURMUKHI LETTER DDHA
0A23 GURMUKHI LETTER NNA
0A24 GURMUKHI LETTER TA
0A25 GURMUKHI LETTER THA
0A26 GURMUKHI LETTER DA
0A27 GURMUKHI LETTER DHA
0A28 GURMUKHI LETTER NA
0A2A GURMUKHI LETTER PA
0A2B GURMUKHI LETTER PHA
0A2C GURMUKHI LETTER BA
0A2D GURMUKHI LETTER BHA
0A2E GURMUKHI LETTER MA
0A2F GURMUKHI LETTER YA
0A30 GURMUKHI LETTER RA
0A32 GURMUKHI LETTER LA
0A33 GURMUKHI LETTER LLA
0A35 GURMUKHI LETTER VA
0A36 GURMUKHI LETTER SHA
0A38 GURMUKHI LETTER SA
0A39 GURMUKHI LETTER HA
0A3C GURMUKHI SIGN NUKTA
0A3E GURMUKHI VOWEL SIGN AA
0A3F GURMUKHI VOWEL SIGN I
0A40 GURMUKHI VOWEL SIGN II
0A41 GURMUKHI VOWEL SIGN U
0A42 GURMUKHI VOWEL SIGN UU
0A47 GURMUKHI VOWEL SIGN EE
0A48 GURMUKHI VOWEL SIGN AI
0A4B GURMUKHI VOWEL SIGN OO
0A4C GURMUKHI VOWEL SIGN AU
0A4D GURMUKHI SIGN VIRAMA
0A51 GURMUKHI SIGN UDAAT
0A59 GURMUKHI LETTER KHHA
0A5A GURMUKHI LETTER GHHA
0A5B GURMUKHI LETTER ZA
0A5C GURMUKHI LETTER RRA
0A5E GURMUKHI LETTER FA
0A66 GURMUKHI DIGIT ZERO
0A67 GURMUKHI DIGIT ONE
0A68 GURMUKHI DIGIT TWO
0A69 GURMUKHI DIGIT THREE
0A6A GURMUKHI DIGIT FOUR
0A6B GURMUKHI DIGIT FIVE
0A6C GURMUKHI DIGIT SIX
0A6D GURMUKHI DIGIT SEVEN
0A6E GURMUKHI DIGIT EIGHT
0A6F GURMUKHI DIGIT NINE
0A70 GURMUKHI TIPPI
0A71 GURMUKHI ADDAK
0A72 GURMUKHI IRI
0A73 GURMUKHI URA
0A74 GURMUKHI EK ONKAR
0A75 GURMUKHI SIGN YAKASH
0A81 GUJARATI SIGN CANDRABINDU
0A82 GUJARATI SIGN ANUSVARA
0A83 GUJARATI SIGN VISARGA
0A85 GUJARATI LETTER A
0A86 GUJARATI LETTER AA
0A87 GUJARATI LETTER I
0A88 GUJARATI LETTER II
0A89 GUJARATI LETTER U
0A8A GUJARATI LETTER UU
0A8B GUJARATI LETTER VOCALIC R
0A8C GUJARATI LETTER VOCALIC L
0A8D GUJARATI VOWEL CANDRA E
0A8F GUJARATI LETTER E
0A90 GUJARATI LETTER AI
0A91 GUJARATI VOWEL CANDRA O
0A93 GUJARATI LETTER O
0A94 GUJARATI LETTER AU
0A95 GUJARATI LETTER KA
0A96 GUJARATI LETTER KHA
0A97 GUJARATI LETTER GA
0A98 GUJARATI LETTER GHA
0A99 GUJARATI LETTER NGA
0A9A GUJARATI LETTER CA
0A9B GUJARATI LETTER CHA
0A9C GUJARATI LETTER JA
0A9D GUJARATI LETTER JHA
0A9E GUJARATI LETTER NYA
0A9F GUJARATI LETTER TTA
0AA0 GUJARATI LETTER TTHA
0AA1 GUJARATI LETTER DDA
0AA2 GUJARATI LETTER DDHA
0AA3 GUJARATI LETTER NNA
0AA4 GUJARATI LETTER TA
0AA5 GUJARATI LETTER THA
0AA6 GUJARATI LETTER DA
0AA7 GUJARATI LETTER DHA
0AA8 GUJARATI LETTER NA
0AAA GUJARATI LETTER PA
0AAB GUJARATI LETTER PHA
0AAC GUJARATI LETTER BA
0AAD GUJARATI LETTER BHA
0AAE GUJARATI LETTER MA
0AAF GUJARATI LETTER YA
0AB0 GUJARATI LETTER RA
0AB2 GUJARATI LETTER LA
0AB3 GUJARATI LETTER LLA
0AB5 GUJARATI LETTER VA
0AB6 GUJARATI LETTER SHA
0AB7 GUJARATI LETTER SSA
0AB8 GUJARATI LETTER SA
0AB9 GUJARATI LETTER HA
0ABC GUJARATI SIGN NUKTA
0ABD GUJARATI SIGN AVAGRAHA
0ABE GUJARATI VOWEL SIGN AA
0ABF GUJARATI VOWEL SIGN I
0AC0 GUJARATI VOWEL SIGN II
0AC1 GUJARATI VOWEL SIGN U
0AC2 GUJARATI VOWEL SIGN UU
0AC3 GUJARATI VOWEL SIGN VOCALIC R
0AC4 GUJARATI VOWEL SIGN VOCALIC RR
0AC5 GUJARATI VOWEL SIGN CANDRA E
0AC7 GUJARATI VOWEL SIGN E
0AC8 GUJARATI VOWEL SIGN AI
0AC9 GUJARATI VOWEL SIGN CANDRA O
0ACB GUJARATI VOWEL SIGN O
0ACC GUJARATI VOWEL SIGN AU
0ACD GUJARATI SIGN VIRAMA
0AD0 GUJARATI OM
0AE0 GUJARATI LETTER VOCALIC RR
0AE1 GUJARATI LETTER VOCALIC LL
0AE2 GUJARATI VOWEL SIGN VOCALIC L
0AE3 GUJARATI VOWEL SIGN VOCALIC LL
0AE6 GUJARATI DIGIT ZERO
0AE7 GUJARATI DIGIT ONE
0AE8 GUJARATI DIGIT TWO
0AE9 GUJARATI DIGIT THREE
0AEA GUJARATI DIGIT FOUR
0AEB GUJARATI DIGIT FIVE
0AEC GUJARATI DIGIT SIX
0AED GUJARATI DIGIT SEVEN
0AEE GUJARATI DIGIT EIGHT
0AEF GUJARATI DIGIT NINE
0AF1 GUJARATI RUPEE SIGN
0B01 ORIYA SIGN CANDRABINDU
0B02 ORIYA SIGN ANUSVARA
0B03 ORIYA SIGN VISARGA
0B05 ORIYA LETTER A
0B06 ORIYA LETTER AA
0B07 ORIYA LETTER I
0B08 ORIYA LETTER II
0B09 ORIYA LETTER U
0B0A ORIYA LETTER UU
0B0B ORIYA LETTER VOCALIC R
0B0C ORIYA LETTER VOCALIC L
0B0F ORIYA LETTER E
0B10 ORIYA LETTER AI
0B13 ORIYA LETTER O
0B14 ORIYA LETTER AU
0B15 ORIYA LETTER KA
0B16 ORIYA LETTER KHA
0B17 ORIYA LETTER GA
0B18 ORIYA LETTER GHA
0B19 ORIYA LETTER NGA
0B1A ORIYA LETTER CA
0B1B ORIYA LETTER CHA
0B1C ORIYA LETTER JA
0B1D ORIYA LETTER JHA
0B1E ORIYA LETTER NYA
0B1F ORIYA LETTER TTA
0B20 ORIYA LETTER TTHA
0B21 ORIYA LETTER DDA
0B22 ORIYA LETTER DDHA
0B23 ORIYA LETTER NNA
0B24 ORIYA LETTER TA
0B25 ORIYA LETTER THA
0B26 ORIYA LETTER DA
0B27 ORIYA LETTER DHA
0B28 ORIYA LETTER NA
0B2A ORIYA LETTER PA
0B2B ORIYA LETTER PHA
0B2C ORIYA LETTER BA
0B2D ORIYA LETTER BHA
0B2E ORIYA LETTER MA
0B2F ORIYA LETTER YA
0B30 ORIYA LETTER RA
0B32 ORIYA LETTER LA
0B33 ORIYA LETTER LLA
0B35 ORIYA LETTER VA
0B36 ORIYA LETTER SHA
0B37 ORIYA LETTER SSA
0B38 ORIYA LETTER SA
0B39 ORIYA LETTER HA
0B3C ORIYA SIGN NUKTA
0B3D ORIYA SIGN AVAGRAHA
0B3E ORIYA VOWEL SIGN AA
0B3F ORIYA VOWEL SIGN I
0B40 ORIYA VOWEL SIGN II
0B41 ORIYA VOWEL SIGN U
0B42 ORIYA VOWEL SIGN UU
0B43 ORIYA VOWEL SIGN VOCALIC R
0B44 ORIYA VOWEL SIGN VOCALIC RR
0B47 ORIYA VOWEL SIGN E
0B48 ORIYA VOWEL SIGN AI
0B4B ORIYA VOWEL SIGN O
0B4C ORIYA VOWEL SIGN AU
0B4D ORIYA SIGN VIRAMA
0B56 ORIYA AI LENGTH MARK
0B57 ORIYA AU LENGTH MARK
0B5C ORIYA LETTER RRA
0B5D ORIYA LETTER RHA
0B5F ORIYA LETTER YYA
0B60 ORIYA LETTER VOCALIC RR
0B61 ORIYA LETTER VOCALIC LL
0B62 ORIYA VOWEL SIGN VOCALIC L
0B63 ORIYA VOWEL SIGN VOCALIC LL
0B66 ORIYA DIGIT ZERO
0B67 ORIYA DIGIT ONE
0B68 ORIYA DIGIT TWO
0B69 ORIYA DIGIT THREE
0B6A ORIYA DIGIT FOUR
0B6B ORIYA DIGIT FIVE
0B6C ORIYA DIGIT SIX
0B6D ORIYA DIGIT SEVEN
0B6E ORIYA DIGIT EIGHT
0B6F ORIYA DIGIT NINE
0B70 ORIYA ISSHAR
0B71 ORIYA LETTER WA
0B82 TAMIL SIGN ANUSVARA
0B83 TAMIL SIGN VISARGA
0B85 TAMIL LETTER A
0B86 TAMIL LETTER AA
0B87 TAMIL LETTER I
0B88 TAMIL LETTER II
0B89 TAMIL LETTER U
0B8A TAMIL LETTER UU
0B8E TAMIL LETTER E
0B8F TAMIL LETTER EE
0B90 TAMIL LETTER AI
0B92 TAMIL LETTER O
0B93 TAMIL LETTER OO
0B94 TAMIL LETTER AU
0B95 TAMIL LETTER KA
0B99 TAMIL LETTER NGA
0B9A TAMIL LETTER CA
0B9C TAMIL LETTER JA
0B9E TAMIL LETTER NYA
0B9F TAMIL LETTER TTA
0BA3 TAMIL LETTER NNA
0BA4 TAMIL LETTER TA
0BA8 TAMIL LETTER NA
0BA9 TAMIL LETTER NNNA
0BAA TAMIL LETTER PA
0BAE TAMIL LETTER MA
0BAF TAMIL LETTER YA
0BB0 TAMIL LETTER RA
0BB1 TAMIL LETTER RRA
0BB2 TAMIL LETTER LA
0BB3 TAMIL LETTER LLA
0BB4 TAMIL LETTER LLLA
0BB5 TAMIL LETTER VA
0BB6 TAMIL LETTER SHA
0BB7 TAMIL LETTER SSA
0BB8 TAMIL LETTER SA
0BB9 TAMIL LETTER HA
0BBE TAMIL VOWEL SIGN AA
0BBF TAMIL VOWEL SIGN I
0BC0 TAMIL VOWEL SIGN II
0BC1 TAMIL VOWEL SIGN U
0BC2 TAMIL VOWEL SIGN UU
0BC6 TAMIL VOWEL SIGN E
0BC7 TAMIL VOWEL SIGN EE
0BC8 TAMIL VOWEL SIGN AI
0BCA TAMIL VOWEL SIGN O
0BCB TAMIL VOWEL SIGN OO
0BCC TAMIL VOWEL SIGN AU
0BCD TAMIL SIGN VIRAMA
0BD0 TAMIL OM
0BD7 TAMIL AU LENGTH MARK
0BE6 TAMIL DIGIT ZERO
0BE7 TAMIL DIGIT ONE
0BE8 TAMIL DIGIT TWO
0BE9 TAMIL DIGIT THREE
0BEA TAMIL DIGIT FOUR
0BEB TAMIL DIGIT FIVE
0BEC TAMIL DIGIT SIX
0BED TAMIL DIGIT SEVEN
0BEE TAMIL DIGIT EIGHT
0BEF TAMIL DIGIT NINE
0BF0 TAMIL NUMBER TEN
0BF1 TAMIL NUMBER ONE HUNDRED
0BF2 TAMIL NUMBER ONE THOUSAND
0BF3 TAMIL DAY SIGN
0BF4 TAMIL MONTH SIGN
0BF5 TAMIL YEAR SIGN
0BF6 TAMIL DEBIT SIGN
0BF7 TAMIL CREDIT SIGN
0BF8 TAMIL AS ABOVE SIGN
0BF9 TAMIL RUPEE SIGN
0BFA TAMIL NUMBER SIGN
0C01 TELUGU SIGN CANDRABINDU
0C02 TELUGU SIGN ANUSVARA
0C03 TELUGU SIGN VISARGA
0C05 TELUGU LETTER A
0C06 TELUGU LETTER AA
0C07 TELUGU LETTER I
0C08 TELUGU LETTER II
0C09 TELUGU LETTER U
0C0A TELUGU LETTER UU
0C0B TELUGU LETTER VOCALIC R
0C0C TELUGU LETTER VOCALIC L
0C0E TELUGU LETTER E
0C0F TELUGU LETTER EE
0C10 TELUGU LETTER AI
0C12 TELUGU LETTER O
0C13 TELUGU LETTER OO
0C14 TELUGU LETTER AU
0C15 TELUGU LETTER KA
0C16 TELUGU LETTER KHA
0C17 TELUGU LETTER GA
0C18 TELUGU LETTER GHA
0C19 TELUGU LETTER NGA
0C1A TELUGU LETTER CA
0C1B TELUGU LETTER CHA
0C1C TELUGU LETTER JA
0C1D TELUGU LETTER JHA
0C1E TELUGU LETTER NYA
0C1F TELUGU LETTER TTA
0C20 TELUGU LETTER TTHA
0C21 TELUGU LETTER DDA
0C22 TELUGU LETTER DDHA
0C23 TELUGU LETTER NNA
0C24 TELUGU LETTER TA
0C25 TELUGU LETTER THA
0C26 TELUGU LETTER DA
0C27 TELUGU LETTER DHA
0C28 TELUGU LETTER NA
0C2A TELUGU LETTER PA
0C2B TELUGU LETTER PHA
0C2C TELUGU LETTER BA
0C2D TELUGU LETTER BHA
0C2E TELUGU LETTER MA
0C2F TELUGU LETTER YA
0C30 TELUGU LETTER RA
0C31 TELUGU LETTER RRA
0C32 TELUGU LETTER LA
0C33 TELUGU LETTER LLA
0C35 TELUGU LETTER VA
0C36 TELUGU LETTER SHA
0C37 TELUGU LETTER SSA
0C38 TELUGU LETTER SA
0C39 TELUGU LETTER HA
0C3D TELUGU SIGN AVAGRAHA
0C3E TELUGU VOWEL SIGN AA
0C3F TELUGU VOWEL SIGN I
0C40 TELUGU VOWEL SIGN II
0C41 TELUGU VOWEL SIGN U
0C42 TELUGU VOWEL SIGN UU
0C43 TELUGU VOWEL SIGN VOCALIC R
0C44 TELUGU VOWEL SIGN VOCALIC RR
0C46 TELUGU VOWEL SIGN E
0C47 TELUGU VOWEL SIGN EE
0C48 TELUGU VOWEL SIGN AI
0C4A TELUGU VOWEL SIGN O
0C4B TELUGU VOWEL SIGN OO
0C4C TELUGU VOWEL SIGN AU
0C4D TELUGU SIGN VIRAMA
0C55 TELUGU LENGTH MARK
0C56 TELUGU AI LENGTH MARK
0C58 TELUGU LETTER TSA
0C59 TELUGU LETTER DZA
0C60 TELUGU LETTER VOCALIC RR
0C61 TELUGU LETTER VOCALIC LL
0C62 TELUGU VOWEL SIGN VOCALIC L
0C63 TELUGU VOWEL SIGN VOCALIC LL
0C66 TELUGU DIGIT ZERO
0C67 TELUGU DIGIT ONE
0C68 TELUGU DIGIT TWO
0C69 TELUGU DIGIT THREE
0C6A TELUGU DIGIT FOUR
0C6B TELUGU DIGIT FIVE
0C6C TELUGU DIGIT SIX
0C6D TELUGU DIGIT SEVEN
0C6E TELUGU DIGIT EIGHT
0C6F TELUGU DIGIT NINE
0C78 TELUGU FRACTION DIGIT ZERO FOR ODD POWERS OF FOUR
0C79 TELUGU FRACTION DIGIT ONE FOR ODD POWERS OF FOUR
0C7A TELUGU FRACTION DIGIT TWO FOR ODD POWERS OF FOUR
0C7B TELUGU FRACTION DIGIT THREE FOR ODD POWERS OF FOUR
0C7C TELUGU FRACTION DIGIT ONE FOR EVEN POWERS OF FOUR
0C7D TELUGU FRACTION DIGIT TWO FOR EVEN POWERS OF FOUR
0C7E TELUGU FRACTION DIGIT THREE FOR EVEN POWERS OF FOUR
0C7F TELUGU SIGN TUUMU
0C82 KANNADA SIGN ANUSVARA
0C83 KANNADA SIGN VISARGA
0C85 KANNADA LETTER A
0C86 KANNADA LETTER AA
0C87 KANNADA LETTER I
0C88 KANNADA LETTER II
0C89 KANNADA LETTER U
0C8A KANNADA LETTER UU
0C8B KANNADA LETTER VOCALIC R
0C8C KANNADA LETTER VOCALIC L
0C8E KANNADA LETTER E
0C8F KANNADA LETTER EE
0C90 KANNADA LETTER AI
0C92 KANNADA LETTER O
0C93 KANNADA LETTER OO
0C94 KANNADA LETTER AU
0C95 KANNADA LETTER KA
0C96 KANNADA LETTER KHA
0C97 KANNADA LETTER GA
0C98 KANNADA LETTER GHA
0C99 KANNADA LETTER NGA
0C9A KANNADA LETTER CA
0C9B KANNADA LETTER CHA
0C9C KANNADA LETTER JA
0C9D KANNADA LETTER JHA
0C9E KANNADA LETTER NYA
0C9F KANNADA LETTER TTA
0CA0 KANNADA LETTER TTHA
0CA1 KANNADA LETTER DDA
0CA2 KANNADA LETTER DDHA
0CA3 KANNADA LETTER NNA
0CA4 KANNADA LETTER TA
0CA5 KANNADA LETTER THA
0CA6 KANNADA LETTER DA
0CA7 KANNADA LETTER DHA
0CA8 KANNADA LETTER NA
0CAA KANNADA LETTER PA
0CAB KANNADA LETTER PHA
0CAC KANNADA LETTER BA
0CAD KANNADA LETTER BHA
0CAE KANNADA LETTER MA
0CAF KANNADA LETTER YA
0CB0 KANNADA LETTER RA
0CB1 KANNADA LETTER RRA
0CB2 KANNADA LETTER LA
0CB3 KANNADA LETTER LLA
0CB5 KANNADA LETTER VA
0CB6 KANNADA LETTER SHA
0CB7 KANNADA LETTER SSA
0CB8 KANNADA LETTER SA
0CB9 KANNADA LETTER HA
0CBC KANNADA SIGN NUKTA
0CBD KANNADA SIGN AVAGRAHA
0CBE KANNADA VOWEL SIGN AA
0CBF KANNADA VOWEL SIGN I
0CC0 KANNADA VOWEL SIGN II
0CC1 KANNADA VOWEL SIGN U
0CC2 KANNADA VOWEL SIGN UU
0CC3 KANNADA VOWEL SIGN VOCALIC R
0CC4 KANNADA VOWEL SIGN VOCALIC RR
0CC6 KANNADA VOWEL SIGN E
0CC7 KANNADA VOWEL SIGN EE
0CC8 KANNADA VOWEL SIGN AI
0CCA KANNADA VOWEL SIGN O
0CCB KANNADA VOWEL SIGN OO
0CCC KANNADA VOWEL SIGN AU
0CCD KANNADA SIGN VIRAMA
0CD5 KANNADA LENGTH MARK
0CD6 KANNADA AI LENGTH MARK
0CDE KANNADA LETTER FA
0CE0 KANNADA LETTER VOCALIC RR
0CE1 KANNADA LETTER VOCALIC LL
0CE2 KANNADA VOWEL SIGN VOCALIC L
0CE3 KANNADA VOWEL SIGN VOCALIC LL
0CE6 KANNADA DIGIT ZERO
0CE7 KANNADA DIGIT ONE
0CE8 KANNADA DIGIT TWO
0CE9 KANNADA DIGIT THREE
0CEA KANNADA DIGIT FOUR
0CEB KANNADA DIGIT FIVE
0CEC KANNADA DIGIT SIX
0CED KANNADA DIGIT SEVEN
0CEE KANNADA DIGIT EIGHT
0CEF KANNADA DIGIT NINE
0CF1 KANNADA SIGN JIHVAMULIYA
0CF2 KANNADA SIGN UPADHMANIYA
0D02 MALAYALAM SIGN ANUSVARA
0D03 MALAYALAM SIGN VISARGA
0D05 MALAYALAM LETTER A
0D06 MALAYALAM LETTER AA
0D07 MALAYALAM LETTER I
0D08 MALAYALAM LETTER II
0D09 MALAYALAM LETTER U
0D0A MALAYALAM LETTER UU
0D0B MALAYALAM LETTER VOCALIC R
0D0C MALAYALAM LETTER VOCALIC L
0D0E MALAYALAM LETTER E
0D0F MALAYALAM LETTER EE
0D10 MALAYALAM LETTER AI
0D12 MALAYALAM LETTER O
0D13 MALAYALAM LETTER OO
0D14 MALAYALAM LETTER AU
0D15 MALAYALAM LETTER KA
0D16 MALAYALAM LETTER KHA
0D17 MALAYALAM LETTER GA
0D18 MALAYALAM LETTER GHA
0D19 MALAYALAM LETTER NGA
0D1A MALAYALAM LETTER CA
0D1B MALAYALAM LETTER CHA
0D1C MALAYALAM LETTER JA
0D1D MALAYALAM LETTER JHA
0D1E MALAYALAM LETTER NYA
0D1F MALAYALAM LETTER TTA
0D20 MALAYALAM LETTER TTHA
0D21 MALAYALAM LETTER DDA
0D22 MALAYALAM LETTER DDHA
0D23 MALAYALAM LETTER NNA
0D24 MALAYALAM LETTER TA
0D25 MALAYALAM LETTER THA
0D26 MALAYALAM LETTER DA
0D27 MALAYALAM LETTER DHA
0D28 MALAYALAM LETTER NA
0D2A MALAYALAM LETTER PA
0D2B MALAYALAM LETTER PHA
0D2C MALAYALAM LETTER BA
0D2D MALAYALAM LETTER BHA
0D2E MALAYALAM LETTER MA
0D2F MALAYALAM LETTER YA
0D30 MALAYALAM LETTER RA
0D31 MALAYALAM LETTER RRA
0D32 MALAYALAM LETTER LA
0D33 MALAYALAM LETTER LLA
0D34 MALAYALAM LETTER LLLA
0D35 MALAYALAM LETTER VA
0D36 MALAYALAM LETTER SHA
0D37 MALAYALAM LETTER SSA
0D38 MALAYALAM LETTER SA
0D39 MALAYALAM LETTER HA
0D3D MALAYALAM SIGN AVAGRAHA
0D3E MALAYALAM VOWEL SIGN AA
0D3F MALAYALAM VOWEL SIGN I
0D40 MALAYALAM VOWEL SIGN II
0D41 MALAYALAM VOWEL SIGN U
0D42 MALAYALAM VOWEL SIGN UU
0D43 MALAYALAM VOWEL SIGN VOCALIC R
0D44 MALAYALAM VOWEL SIGN VOCALIC RR
0D46 MALAYALAM VOWEL SIGN E
0D47 MALAYALAM VOWEL SIGN EE
0D48 MALAYALAM VOWEL SIGN AI
0D4A MALAYALAM VOWEL SIGN O
0D4B MALAYALAM VOWEL SIGN OO
0D4C MALAYALAM VOWEL SIGN AU
0D4D MALAYALAM SIGN VIRAMA
0D57 MALAYALAM AU LENGTH | |
<filename>UserCode/John/ReWriteAcousticT0.py<gh_stars>1-10
from __future__ import division
import copy
import re
import numpy as np
import scipy.signal
from scipy import optimize
import matplotlib.pyplot as plt
def my_rms(arr):
#return np.sqrt(arr.dot(arr)/arr.size)
return np.std(arr)
def extend_window(w, r):
# Inputs:
# w: An array of 2 elements. Normally, this will be a window like [t1, t2]
# r: A float used as a ratio to extend w
# Outputs: A rescaled version of w
mp = 0.5*(w[1]+w[0]) # Midpoint
new_len = (w[1]-w[0])*(1+r) # Length of new window
return [mp-new_len/2, mp+new_len/2]
def freq_filter(freqs, lower=None, upper=None):
# Inputs:
# freqs: An array of frequency bins
# lower: The lower frequency to cut-off at
# upper: The upper frequency to cut-off at
# Outputs: An array of indices where the the frequency in freqs is between lower and upper
if lower is None and upper is None:
return freqs
if lower is None:
return np.where([x <= upper for x in freqs])
if upper is None:
return np.where([x >= lower for x in freqs])
return np.where([lower <= x <= upper for x in freqs])
def closest_index(arr, el):
# Inputs:
# arr: A 1-dimensional array
# el: Any element
# Outputs: The FIRST index of the item in arr that is closest to el.
# Notes: Arr does NOT have to be sorted.
return np.argmin(np.abs(arr-el))
def spectrum_sums(spectrum, fr, n, lowerf=None, upperf=None):
# Inputs:
# spectrum: The output 2d spectrum from a spectogram
# fr: A list of frequency bins corresponding to the spectrum
# n: Number of bins
# lowerf: The lower frequency to cut-off at
# upperf: The upper frequency to cut-off at
# Outputs: A compressed 1d array where each element is the sum of a bin from spectrum, only counting
# frequencies between lowerf and upperf
out = []
good_indices = freq_filter(fr, lowerf, upperf)
for subn in range(n):
out.append(np.trapz(spectrum[good_indices[0], subn], dx=np.mean(np.diff(fr))))
return out
def rescale_window(w1, w2):
# Inputs:
# w1: An array with 2 elements
# w2: An array with 2 elements
# Outputs: A rescaled version of w2 so tha the endpoints of w2 match w1 but the number of elements remain the same
y1, y2 = min(w1), max(w1)
x1, x2 = min(w2), max(w2)
if x1 == x2:
return 0*w2
a = (y1-y2)/(x1-x2)
b = (x1*y2-x2*y1)/(x1-x2)
return a*w2+b
def corr_signal(tau, dt, t0, n, fit_type=0, shift=10):
# Inputs:
# tau: Time constant on exponential decay
# dt: Step size for the x-axis
# t0: Where the exponential signal will start. Not important when used with correlation
# N: Number of points requested
# fit_type: The type of signal to create. See corr_signal_type_templates.py for a better explanation.
# fit_type = 0 --> Exponential decay
# fit_type = 1 --> Constant 1 followed by exponential decay (continuous)
# fit_type = 2 --> Linear increase followed by exponential decay
# fit_type = 3 --> Log increase followed by exponential decay
# fit_type = 4 --> 0 value followed by an exponential decrease. Discontinuous.
# Outputs:
# t: t-values for plotting
# y: y-values of our filter signal.
# After careful analysis, we've determined that there reaches a point in the filtered piezo signal that
# exhibits a sharp increase followed by an exponential decay. This function returns a brief exponential
# decay function for use with convolution/correlation.
shift = int(np.ceil(shift))
t = np.linspace(t0, t0+dt*n, n)
y = np.exp(-(t-t0)/tau)
ycopy = copy.deepcopy(y)
if fit_type == 0:
pass
elif fit_type == 1:
for subn in range(len(y) - shift):
y[subn+shift] = ycopy[subn]
y[0:shift] = 1
elif fit_type == 2:
for subn in range(len(y) - shift):
y[subn + shift] = ycopy[subn]
y[0:shift] = (t[0:shift] - t0)/(shift*dt)
elif fit_type == 3:
for subn in range(len(y) - shift):
y[subn + shift] = ycopy[subn]
y[0:shift] = np.log((t[0:shift] + 1 - t0)) / np.log(shift*dt + 1)
elif fit_type == 4:
for subn in range(len(y) - shift):
y[subn+shift] = ycopy[subn]
y[0:shift] = 0
return t, y
# Uncomment the below code if you want to test a new fit_type quickly to make sure the shape is what you want
# fit_type = 0
# t,y = corr_signal(1.5, 0.1, 0, 45, fit_type, 20) # <-- Uncomment to test different fit types
# plt.ioff()
# plt.plot(t,y)
# plt.show()
# 1/0 # <-- Just to stop the program here
def find_t0_from_corr(corrt, corry):
# Inputs:
# corrt: Time-values of the correlation signal
# corry: Y-values of the correlation signal
# Outputs: The time of the maximum in corry such that corrt is less than or equal to 0.
n = np.where(corrt >= 0)
corry[n] = 0
return corrt[np.argmax(corry)]
def calculate_t0(piezo_waveform, piezo_timebase, led_on, tau,
lower=20000, upper=40000, piezo_fit_type=0):
# Inputs:
# piezo_waveform: A piezo waveform, generally this should have the LED pulses subtracted
# piezo_timebase: The times of each element in the piezo_waveform
# tau: The time constant we are trying to fit to the exponential decay that occurs
# immediately after the bubble forms
# lower: The lower frequency threshold for cutting off the spectrogram
# upper: The upper frequency threshold for cutting off the spectrogram
# piezo_fit_type: The type of fit to use when trying to match the filtered piezo signal. Defaults to 0.
# view_plots: Boolean. If true, will display some plots for analysis.
# Outputs: A dictionary of results for the Acoustic Analysis.
try:
timebase = piezo_timebase
textent = [min(timebase), max(timebase)]
dt = np.mean(np.diff(timebase))
fr, bn, sp = scipy.signal.spectrogram(piezo_waveform, fs=1./dt, nfft=512, noverlap=450,
mode="psd", window="hanning", nperseg=512)
n = len(bn)
sp_sums = spectrum_sums(sp, fr, n, lower, upper)
sp_sums = scipy.signal.medfilt(sp_sums)
rescaled_t = rescale_window(textent, bn)
corr_dt = np.mean(np.diff(rescaled_t))
corr_n = 1000
corr_t, corr_y = corr_signal(tau, corr_dt, rescaled_t[0], corr_n, fit_type=piezo_fit_type)
corr = np.correlate(sp_sums, corr_y, "same")
corr_t = rescaled_t - 0.5 * corr_n * corr_dt
test_t0 = find_t0_from_corr(corr_t, corr) # This is the t0 we begin to look backwards from
# Establish a baseline for our lookback algorithm
# But first we take the log of the [integrated] spectrogram signal
log_sp_sums = np.log(sp_sums)
first_on = np.argmax(led_on)
first_off = np.argmin(led_on[first_on:])
second_on = np.argmax(led_on[first_off:])
t_first_off = timebase[first_off]
t_second_on = timebase[second_on]
if not np.any(led_on):
return np.nan
rescaled_t_first_off_index = np.argmin(np.abs(rescaled_t - t_first_off))
rescaled_t_second_on_index = np.argmin(np.abs(rescaled_t - t_second_on))
baseline = np.average(log_sp_sums[rescaled_t_first_off_index+1:rescaled_t_second_on_index])
baseline_rms = my_rms(log_sp_sums[rescaled_t_first_off_index+1:rescaled_t_second_on_index])
test_t0_index = np.argmin(np.abs(rescaled_t - test_t0))
rescaled_dt = np.mean(np.diff(rescaled_t))
t_thresh = 100e-6
n_lookback = int(np.floor(t_thresh/rescaled_dt))
pts_lookbacked_sofar = 0
while True:
to_test = log_sp_sums[test_t0_index-n_lookback-pts_lookbacked_sofar:test_t0_index-pts_lookbacked_sofar]
if np.all(to_test<(baseline+5*baseline_rms)):
break
pts_lookbacked_sofar += 1
if test_t0_index-n_lookback-pts_lookbacked_sofar <= 0:
pts_lookbacked_sofar = -1
break
if pts_lookbacked_sofar != -1:
t0 = rescaled_t[test_t0_index-pts_lookbacked_sofar] + rescaled_dt/2
plt.ioff()
plt.plot(rescaled_t, log_sp_sums, color="b", zorder=1)
plt.axhline(baseline, color="r", zorder=2)
plt.axhline(baseline+5*baseline_rms, color="r", zorder=2)
plt.fill_between(rescaled_t, -18, baseline+5*baseline_rms, facecolor="red", alpha=0.3, zorder=10)
plt.axvline(t0, color="m", linewidth=4, zorder=3)
#plt.axvline(test_t0, color="r")
#plt.axvline(t_first_off, color="k")
#plt.axvline(t_second_on, color="k")
plt.xlabel("Time (ms)")
plt.ylabel("Log(Filtered Signal)")
plt.show()
else:
t0 = np.nan
return t0
except Exception as e:
return np.nan
def BandPass2(yd, f_low, f_high):
fband = np.array([f_low, f_high])
b, a = scipy.signal.butter(2, fband / (2.5e6 / 2.0), btype='bandpass', output='ba')
yd_f = scipy.signal.filtfilt(b, a, yd)
return yd_f
def CalcPiezoE(yd, td, t_wins, f_bins, t0):
piezoE = np.zeros((t_wins.shape[0],
f_bins.shape[0] - 1),
dtype=np.float64) + np.nan
if np.isnan(t0):
return piezoE
dt = td[1] - td[0]
t_wins = t_wins + t0
t_wins_ix = np.intp(np.round((t_wins - td[0]) / dt))
t_wins_ix[t_wins_ix < 0] = 0
t_wins_ix[t_wins_ix > td.shape[0]] = td.shape[0]
for i_win in range(t_wins.shape[0]):
this_yd = yd[t_wins_ix[i_win][0]:t_wins_ix[i_win][1]]
if len(this_yd) < 2:
continue
fft_amp = np.fft.rfft(this_yd)
fft_pow = (np.abs(fft_amp) ** 2) * dt / len(this_yd)
df = 1 / (dt * len(this_yd))
fd = df * (np.arange(len(fft_amp), dtype=np.float64) + 1)
f_bins_ix = np.intp(np.round((f_bins / df) - 1))
f_bins_ix[f_bins_ix < 0] = 0
f_bins_ix[f_bins_ix > len(fft_amp)] = len(fft_amp)
fft_en = fft_pow * (fd ** 2)
for i_f in range(len(f_bins) - 1):
piezoE[i_win, i_f] = df *\
np.sum(fft_en[f_bins_ix[i_f]:f_bins_ix[i_f + 1]])
return piezoE
def AcousticAnalysis(ev, tau, piezo_fit_type=0,
f_high=np.float64(40e3), f_low=np.float64(6e3),
led_amp=np.float64(-0.1), led_tau=np.float64(2e-4),
bs_win=np.float64([-0.15, -0.12]),
t0_win=np.float64([-0.12, 0]),
meansamp=np.intp(1e4), notbs_win=np.float64(2e-4),
t_wins=np.float64([[[-2e-2, -1e-2],
[-1e-3, 9e-3],
[-2e-4, 4e-3]],
[[-2e-2, -1e-2],
[-1e-3, 9e-3],
[-2e-4, 4e-3]]], ),
f_bins=np.float64([[1e2, 1e3, 1e4, 1e5],
[1e2, 1e3, 1e4, 1e5]]),
corr_lowerf=20000, corr_upperf=40000):
# Inputs:
# ev: Event data (from GetEvent)
# tau: The expected time-constant of the exponential decay from the filtered piezo signal
# piezo1_fit_type: See corr_signal_types.py
# | |
* sin(δ)
where:
ϕ - latitude [rad]
δ - solar declination [rad]
ω - solar time angle [rad]
Parameters
----------
dt : numpy.datetime64
Moment.
lat : float
Decimal latitude in degrees.
lon : float
Decimal longitude in degrees.
Returns
-------
float
Solar zenith angle in radians.
'''
hour_angle = solar_time_angle(dt, lon)
phi = np.radians(lat)
declination = solar_declination(dt)
return np.arccos((np.cos(phi) * np.cos(declination) * np.cos(hour_angle)) + (np.sin(phi) * np.sin(declination)))
def instantaneous_exoatmospheric_irradiance(dt, lat, lon):
'''
Calculates the exoatmospheric solar irradiance over a horizontal surface at a
given moment.
The exoatmospheric irradiance is given by
Gsc * 1 / dr * cos(Φ)
where:
Gsc: solar constant (1366 W/m²)
dr: relative earth sun distance
Φ: solar zenith angle
Parameters
----------
dt : numpy.datetime64
Date and time UTC.
lat : float
Latitude (decimal degrees).
lon : float
Longitude (decimal degrees).
Returns
-------
float
instantaneous_exoatmospheric_irradiance [W/m²].
'''
dr = 1 / earth_sun_distance(dt)
sz = solar_zenith_angle(dt, lat, lon)
return np.fmax(1366 * dr * np.cos(sz), 0)
def net_shortwave_radiation(rs, albedo = 0.23):
'''
Calculates net shortwave radiation as the difference between incoming
shortwave radiation and reflected shortwave radiation on a horizontal
surface.
Parameters
----------
rs : float
Incoming shortwave radiation on a horizontal surface [energy / time / area].
albedo : float
Albedo of the horizontal surface [adimensional]. The default value is 0.23,
which is the albedo of the reference crop for calculation of reference
evapotranspiration by means of the standardized Penman-Monteith FAO
equation.
Returns
-------
float
The difference between incoming and reflected shortwave radiation (same unit as rs).
'''
return (1 - albedo) * rs
def cloud_function(rs, rs0, se):
'''
Calculates the cloud-dependant part of the equation for net
longwave radiation.
Parameters
----------
rs : float
Solar global irradiance over a horizontal surface [J / m²]
rs0 : float
Solar global irradiance over a horizontal surface reaching the top of atmosphere [J / m²]
se : float
Solar elevation angle [rad]
'''
rr = np.clip(rs / np.where(se > 0.3, rs0, 1), 0.3, 1)
return 1.35 * rr - 0.35
def net_longwave_radiation(tmax, tmin, ea, cf = None, hourly = False):
'''
Calculates net longwave radiation for daily or hourly periods.
Parameters
----------
tmax : float
Maximum air temperature [K].
tmin : float
Minimum air temperature [K].
ea : float
Water vapour pressure [Pa].
rs : float
Solar radiation on a horizontal surface [J/m²].
rs0 : float
Solar radiation on a horizontal surface in clear sky conditions [J/m²].
se : float, optional
Solar elevation angle at the midpoint of the calculation period.
Not necessary if hourly is set False. The default is None.
cf_pred : float, optional
DESCRIPTION. The default is None.
se_threshold : float, optional
DESCRIPTION. The default is 0.3.
hourly : bool, optional
DESCRIPTION. The default is False.
Returns
-------
float
net longwave radiation [J/m**2].
'''
# p1 - emission of longwave radiation by air
# p2 - effect of water vapour
# cf - effect of cloudness
mult = 3600 if (hourly) else 86400
p1 = mult * (stefan_boltzmann_law(tmax) + stefan_boltzmann_law(tmin)) / 2
p2 = 0.34 - 0.004427188724235732 * np.sqrt(ea)
return -(p1 * p2 * cf)
def clear_sky_radiation(z, ra):
return (0.75 + 2e-5 * z) * ra
def latent_heat_of_vaporization(temp):
'''
Calculates the latent heat of vaporization of water as a function of
temperature.
Parameters
----------
temp : float
Temperature [K].
Returns
-------
float
latent heat of vaporization of water [J/kg].
'''
return 3145907.15 - 2361 * temp
def atmospheric_pressure(z, temp = 293.15, lb = 6.5e-3):
"""
Calculates atmospheric pressure at a given height.
Parameters
----------
z : float
Altitude above sea level [m].
temp : float, optional
Meam atmospheric temperature [K]. The default is 288.15.
lb : float, optional
Temperature lapse rate [K / m] (i.e. how many Kelvin the temperature of air decreases
with a 1 m increase in altitude). The default is 5e-3 K / m.
Returns
-------
float
Atmospheric pressure at altitude z.
"""
p0, g, rd = 101325, 9.80665, 287.058
power = -g / (rd * lb)
return p0 * ((temp + lb * z) / temp) ** power
def log_wind_profile(u1, z2 = 2, z1 = 10, d = 0.084, z0 = 0.01476):
"""
Estimates wind speed at height z2 based on wind speed measued at
height z1, given the height of the zero plane displacement and the roughness
length of the surface. For a standardized FAO Peman Monteith ET0 surface
you can use the default values for d and z0. If the wind speed is measured
at a standard weather station, which measures wind speed
at a 10m height, you can use the default value for z1.
Parameters
----------
u1 : float
Wind speed [m/s] measured at height z1.
z2 : float
Height z2 [m].
z1 : float, optional
Height z1 [m]. The default is 10.
d : float, optional
Zero plane displacement height. If not set, a default value = 0.08 will be set, which
is the zero plane displacement height estimated for a 0.12m height uniform crop.
z0 : float, optional
Roughness length. If not set, a default value of 0.01476 will be set, which
corresponds to the roughness length of the standardized FAO ET0 Penman-Monteith
equation.
Returns
-------
float
Wind speed at height z2.
"""
return u1 * np.log((z2 - d) / z0) / np.log((z1 - d) / z0)
def air_density(temp, patm, pw = 0):
"""
Calculates the density of dry air by means of the universal gas law as a
function of air temperature and atmospheric pressure.
m / V = [Pw / (Rv * T)] + [Pd / (Rd * T)]
where:
Pd: Patm - Pw
Rw: specific gas constant for water vapour [Rw = 461.495 MJ/kg/K]
Rv: specific gas constant for dry air [Rv = 287.058 MJ/kg/K]
T: air temperature [K]
m/V: density of air [kg/m³]
Parameters
----------
temp : float
Air temperature [K].
patm : float
Atmospheric pressure [Pa].
pw : float
Vapour pressure [Pa]. Default to 0 Pa (dry air).
Returns
-------
float
Air density [kg/m³].
"""
rd, rw = 287.058, 461.495 # specific gas constant for dry air and water vapour [J / (kg K)]
pd = patm - pw
return (pd / (rd * temp)) + (pw / (rw * temp))
def absolute_humidity(ea, temp):
'''
Calculates absolute humidity from partial pressure of water vapour and air
temperature.
Parameters
----------
ea : float
Partial pressure of water vapour [Pa].
temp : float
Absolute temperature [K].
Returns
-------
float
Absolute humidity [kg / m**3].
'''
rw = 461.495 # specific gas constant for water vapour [J / (kg K)]
return ea / (rw * temp)
def specific_humidity(patm, ea, temp):
'''
Calculates specific humidity of air.
Parameters
----------
patm : float
Atmospheric pressure [Pa].
ea : float
Partial pressure of water vapour [Pa].
temp : float
Absolute temperature [K].
Returns
-------
float
Specific humidity [kg / kg].
'''
rho = air_density(temp, patm, pw = ea)
hum = absolute_humidity(ea, temp)
return hum / rho
def vapour_pressure_from_absolute_humidity(h, temp):
"""
Calculates the partial pressure of water vapour for a given absolute air
humidity and temperature.
Parameters
----------
h : float
absolute humidity [kg/m³].
temp : float
air temperature [K].
Returns
-------
float
Partial pressure of water vapour [Pa].
"""
rw = 461.495 # specific gas constant for water vapour [J / (kg K)]
return h * temp * rw
def vapour_pressure_from_specific_humidity(q, patm, temp, max_iter = 20):
"""
Returns the partial pressure of water vapour for a given specific air
humidity and atmospheric condition.
Parameters
----------
q : float
specific humidity of air [kg/kg].
patm : float
atmospheric pressure [Pa].
temp : float
air temperature [K].
max_iter : integer, optional
max number of iterations until it stops. The default is 20.
Returns
-------
pw : float
Vapour pressure [Pa] for | |
>= 2:
print('build_schedule(): Adding input variable hook: ',
in_var_hook)
print('For input variable: ', ar)
# Create post-run hooks for any arrays that are dynamically
# allocated inside the schedule.
if unique_array_index in self.dynamically_allocated_unique_index:
if key in self.array_id_to_param_map:
param_attribute_location = self.array_id_to_param_map[key]
param_hook = (unique_array_index, param_attribute_location)
self.param_post_hooks.append(param_hook)
if self.verbosity_level >= 2:
print('self.param_hooks: ', self.param_hooks)
self.debug_print_unique_arrays_info()
# todo: We can potentially reduce memory usage by freeing memory
# of intermediate arrays in self.unique_arrays
# once they are no longer needed in the schedule or by
# parameters.
print('end of build_schedule()')
self.schedule_built = True
def forward(self, inputs):
if self.verbosity_level >= 2:
print('Calling StaticScheduleFunction.forward()...')
# Note: This method will be invoked every iteration starting
# from the second
# iteration. That is because the corresponding define-by-run
# code runs instead
# during the first iteration.
if not self.schedule_built:
raise RuntimeError('forward() was called before '
'build_schedule()!')
self.run_param_pre_hooks()
self.run_in_var_hooks(inputs)
if self.verbosity_level >= 2:
print('Running static schedule...')
# Run each function in the static schedule.
for x in self.schedule_info_list:
x()
if self.verbosity_level >= 2:
self.debug_print_unique_arrays_info()
self.run_out_var_hooks()
self.run_param_post_hooks()
ret = []
for y in self.out_vars:
if y is None or y.data is None:
ret.append(None)
else:
# todo: add test case for an example where the following
# copy is required (evaluation mode, repeated calls of
# chain that reuse same schedule).
ret.append(y.data.copy())
return tuple(ret)
def backward(self, target_input_indexes, grad_outputs):
if self.verbosity_level >= 2:
print('Calling StaticScheduleFunction.backward()...')
# The first time this method is called, the define-by-run code is
# executed in order to create a static schedule.
self.schedule_manager.end_forward()
if self.backward_schedule_func is None:
print('Creating new backward schedule...')
# Create backward schedule and run define-by-run backward code.
self.backward_schedule_func = self.get_contained_schedule()
# Make local copies of the variables in grad_outputs.
new_grad_outputs = []
for var in grad_outputs:
# Replace each input variable with a new variable having
# the same data.
new_grad_outputs.append(chainer.Variable(var.data))
with chainer.using_config('schedule_func',
self.backward_schedule_func):
with chainer.using_config('enable_backprop', True):
for ind, var in enumerate(new_grad_outputs):
# todo: possibly don't need the following:
self.out_vars[ind].grad = new_grad_outputs[ind].data
inputs = [param for param in self.chain.params()]
for var in self.in_vars:
inputs.append(var)
# Need shorter var to avoid "line too long error"
ugh = self.enable_double_backprop
chainer.grad(self.out_vars,
inputs,
grad_outputs=new_grad_outputs,
set_grad=True,
enable_double_backprop=ugh)
# We no longer need the backward graph from self.out_vars, so
# unchain them.
# todo (vogel): enable this eventually. For now, it
# causes some needed variables to be set to None
# in some models such as CIFAR example.
# for var in self.out_vars:
# var.unchain_backward()
# Note: var.grad_var is allowed to be None below:
backward_out_vars = [var.grad_var for var in self.in_vars]
self.backward_schedule_func.set_out_variables(backward_out_vars)
for n in range(len(self.in_vars)):
self.in_vars[n] = None
if self.verbosity_level >= 2:
print('building backward schedule.')
self.backward_schedule_func.build_schedule(self.chain,
new_grad_outputs)
return self.backward_schedule_func.apply(grad_outputs)
class ScheduleManager(object):
"""A manager of static schedules for a static chain.
This is a container of the static schedules that are used by a static
chain.
Args:
minimize_cache_size (bool): If `True`, attempt to reduce memory
usage by clearing the cached schedules whenever the training
mode changes (that is, whenever `chainer.config.train` changes
value) or whenever the mini-batch size changes.
"""
def __init__(self, minimize_cache_size=True, verbosity_level=0):
# Maps a key string to a list of schedule functions.
self.schedules = dict()
self.minimize_cache_size = minimize_cache_size
self.in_use_count = dict()
self.forward_over = False
self.prev_train_config = None
self.max_in_use_train = 0
self.train_count = 0
self.verbosity_level = verbosity_level
def get_schedule(self, in_vars, enable_double_backprop=False):
"""Get a static schedule.
Return a static schedule object (that is, an instance of
``StaticScheduleFunction``) that is compatible with
the current configuration and input variables to the supplied chain.
If there is no existing schedule available, return an empty schedule
object.
During the usual "training mode" (that is, when both
`chainer.config.enable_backprop` and `chainer.config.train`
are `True`), this method will always return a distince static
schedule each time it is called within the same iteration.
It will also try to reuse
existing schedules across iterations. Therefore, any schedule that
is returned in a given iteration cannot be returned again until
the following iteration. However, if either of these flags is
'False', then this method may return the same schedule instance
multiple times within the same iteration, as long as it is
compatible with `in_vars`.
Note that in order to implement the above behavior, the schedule
manager must be informed when the current iteration has finished.
This is accomplished by calling `end_forward()` after the
iteration has finished. If a backward pass is performed, then
`end_forward()` will be automatically called. Otherwise, it
will not be called and the user will be responsible for calling
it.
Args:
in_vars (tuple of :class:`~chainer.Variable`): The input
variables to the chain.
Returns:
An instance of ``StaticScheduleFunction``.
"""
if self.forward_over:
self.forward_over = False
if self.minimize_cache_size:
if chainer.config.train != self.prev_train_config:
# Training config changed, so clear caches.
self.prev_train_config = chainer.config.train
if self.verbosity_level >= 2:
print("Clearing schedule cache...")
self.schedules.clear()
self.in_use_count.clear()
if (chainer.config.train is False or
chainer.config.enable_backprop is False):
key_str = 'test:' + \
''.join(str(x.shape) + str(x.dtype) for x in in_vars)
# If the maximum number of in-use schedules in any iteration
# during training mode was exactly 1, assume it should also
# be 1 for test mode.
if key_str in self.schedules:
sched_list = self.schedules[key_str]
sched = sched_list[0]
else:
# avoid "line too long":
vb = self.verbosity_level
edb = enable_double_backprop
sched = StaticScheduleFunction(self,
verbosity_level=vb,
enable_double_backprop=edb)
self.schedules[key_str] = [sched]
return sched
else:
key_str = 'train:' + \
''.join(str(x.shape) + str(x.dtype) for x in in_vars)
self.train_count += 1
if key_str in self.schedules:
sched_list = self.schedules[key_str]
available_index = self.in_use_count[key_str]
if available_index >= len(sched_list):
# avoid "line too long":
vb = self.verbosity_level
edb = enable_double_backprop
sched = StaticScheduleFunction(self,
verbosity_level=vb,
enable_double_backprop=edb)
sched_list.append(sched)
sched = sched_list[available_index]
self.in_use_count[key_str] = available_index + 1
else:
# avoid "line too long":
vb = self.verbosity_level
edb = enable_double_backprop
sched = StaticScheduleFunction(self,
verbosity_level=vb,
enable_double_backprop=edb)
self.schedules[key_str] = [sched]
self.in_use_count[key_str] = 1
return sched
def end_forward(self):
"""Make in-use schedules available for use in next iteration.
Set the in-use status of all schedules to "not in use" so that
they can be reused in the next iteration.
In the case that test mode is active
(`chainer.config.train` is `False`) and the static chain corresponding
to this manager was not called more than once in any iteration during
training mode, then this method will be called automatically.
"""
if not self.forward_over:
for key in self.in_use_count:
self.in_use_count[key] = 0
self.forward_over = True
if self.train_count > self.max_in_use_train:
self.max_in_use_train = self.train_count
if self.verbosity_level >= 2:
print("Maximum in-use schedules per training iteration: ",
self.max_in_use_train)
self.train_count = 0
def __repr__(self):
out = "ScheduleManager:\n"
for key_str in self.schedules:
out += "key string: " + key_str
sched_list = self.schedules[key_str]
out += " -> schedule list of length: " + \
str(len(sched_list)) + '\n'
for sched in sched_list:
out += str(sched)
return out
def static_graph(*args, **kwargs):
"""Decorator to mark a Chain's ``__call__()`` as a static sub-graph.
This decorator marks the define-by-run code inside the `__call__()`
method of a Chain instance as corresponding to a static computation
graph or sub-graph. Such a chain will be referred to as a 'static chain'.
This allows various "static graph" optimizations to be performed, which
can result in significant speedups for some models.
When this decorator is used, the chain's define-by-run code executes
during the first iteration as usual. However, while the define-by-run
code is executing, a trace is also performed to incrementally create a
corresponding static schedule. This static schedule will only contain
the subset of the computations inside the define-by-run code that actually
needs to run every iteration. Specifically, this will contain the code
inside any functions called that were annotated with the `@static_code`
decorator, which will include all Chainer built-in functions, as well as
any user-defined functions that use `@static_code`. Then, starting
from the second iteration, when the static chain is called, its
static schedule code will be executed instead of its define-by-run code.
However, the user must also be careful of the following:
- The user is responsible for applying this decorator correctly. The
framework
does | |
0.1*m.x4976 - 0.1*m.x4977 - 0.1*m.x4978 - 0.1*m.x4979
- 0.1*m.x4980 - 0.1*m.x4981 - 0.1*m.x4982 - 0.1*m.x4983 - 0.1*m.x4984 - 0.1*m.x4985
- 0.1*m.x4986 - 0.1*m.x4987 - 0.1*m.x4988 - 0.1*m.x4989 - 0.1*m.x4990 - 0.1*m.x4991
- 0.1*m.x4992 - 0.1*m.x4993 - 0.1*m.x4994 - 0.1*m.x4995 - 0.1*m.x4996 - 0.1*m.x4997
- 0.1*m.x4998 - 0.1*m.x4999 - 0.1*m.x5000 - 0.1*m.x5001 - 0.1*m.x5002 - 0.1*m.x5003
- 0.1*m.x5004 - 0.1*m.x5005 - 0.1*m.x5006 - 0.1*m.x5007 - 0.1*m.x5008 - 0.1*m.x5009
- 0.1*m.x5010 - 0.1*m.x5011 - 0.1*m.x5012 - 0.1*m.x5013 - 0.1*m.x5014 - 0.1*m.x5015
- 0.1*m.x5016 - 0.003*m.x5017 - 0.003*m.x5018 - 0.003*m.x5019 - 0.003*m.x5020 - 0.003*m.x5021
- 0.003*m.x5022 - 0.003*m.x5023 - 0.003*m.x5024 - 0.003*m.x5025 - 0.003*m.x5026 - 0.003*m.x5027
- 0.003*m.x5028 - 0.003*m.x5029 - 0.003*m.x5030 - 0.003*m.x5031 - 0.003*m.x5032 - 0.003*m.x5033
- 0.003*m.x5034 - 0.003*m.x5035 - 0.003*m.x5036 - 0.003*m.x5037 - 0.003*m.x5038 - 0.003*m.x5039
- 0.003*m.x5040 - 0.003*m.x5041 - 0.003*m.x5042 - 0.003*m.x5043 - 0.003*m.x5044 - 0.003*m.x5045
- 0.003*m.x5046 - 0.003*m.x5047 - 0.003*m.x5048 - 0.003*m.x5049 - 0.003*m.x5050 - 0.003*m.x5051
- 0.003*m.x5052 - 0.003*m.x5053 - 0.003*m.x5054 - 0.003*m.x5055 - 0.003*m.x5056 - 0.003*m.x5057
- 0.003*m.x5058 - 0.003*m.x5059 - 0.003*m.x5060 - 0.003*m.x5061 - 0.003*m.x5062 - 0.003*m.x5063
- 0.003*m.x5064 - 0.003*m.x5065 - 0.003*m.x5066 - 0.003*m.x5067 - 0.003*m.x5068 - 0.003*m.x5069
- 0.003*m.x5070 - 0.003*m.x5071 - 0.003*m.x5072 - 0.003*m.x5073 - 0.003*m.x5074 - 0.003*m.x5075
- 0.003*m.x5076 - 0.003*m.x5077 - 0.003*m.x5078 - 0.003*m.x5079 - 0.003*m.x5080 - 0.003*m.x5081
- 0.003*m.x5082 - 0.003*m.x5083 - 0.003*m.x5084 - 0.003*m.x5085 - 0.003*m.x5086 - 0.003*m.x5087
- 0.003*m.x5088 - 0.003*m.x5089 - 0.003*m.x5090 - 0.003*m.x5091 - 0.003*m.x5092 - 0.003*m.x5093
- 0.003*m.x5094 - 0.003*m.x5095 - 0.003*m.x5096 - 0.003*m.x5097 - 0.003*m.x5098 - 0.003*m.x5099
- 0.003*m.x5100 - 0.003*m.x5101 - 0.003*m.x5102 - 0.003*m.x5103 - 0.003*m.x5104 - 0.003*m.x5105
- 0.003*m.x5106 - 0.003*m.x5107 - 0.003*m.x5108 - 0.003*m.x5109 - 0.003*m.x5110 - 0.003*m.x5111
- 0.003*m.x5112 - 0.003*m.x5113 - 0.003*m.x5114 - 0.003*m.x5115 - 0.003*m.x5116 - 0.003*m.x5117
- 0.003*m.x5118 - 0.003*m.x5119 - 0.003*m.x5120 - 0.003*m.x5121 - 0.003*m.x5122 - 0.003*m.x5123
- 0.003*m.x5124 - 0.003*m.x5125 - 0.003*m.x5126 - 0.003*m.x5127 - 0.003*m.x5128 - 0.003*m.x5129
- 0.003*m.x5130 - 0.003*m.x5131 - 0.003*m.x5132 - 0.003*m.x5133 - 0.003*m.x5134 - 0.003*m.x5135
- 0.003*m.x5136 - 0.003*m.x5137 - 0.003*m.x5138 - 0.003*m.x5139 - 0.003*m.x5140 - 0.003*m.x5141
- 0.003*m.x5142 - 0.003*m.x5143 - 0.003*m.x5144 - 0.003*m.x5145 - 0.003*m.x5146 - 0.003*m.x5147
- 0.003*m.x5148 - 0.003*m.x5149 - 0.003*m.x5150 - 0.003*m.x5151 - 0.003*m.x5152 - 0.003*m.x5153
- 0.003*m.x5154 - 0.003*m.x5155 - 0.003*m.x5156 - 0.003*m.x5157 - 0.003*m.x5158 - 0.003*m.x5159
- 0.003*m.x5160 - 0.003*m.x5161 - 0.003*m.x5162 - 0.003*m.x5163 - 0.003*m.x5164 - 0.003*m.x5165
- 0.003*m.x5166 - 0.003*m.x5167 - 0.003*m.x5168 - 0.003*m.x5169 - 0.003*m.x5170 - 0.003*m.x5171
- 0.003*m.x5172 - 0.003*m.x5173 - 0.003*m.x5174 - 0.003*m.x5175 - 0.003*m.x5176 - 0.003*m.x5177
- 0.003*m.x5178 - 0.003*m.x5179 - 0.003*m.x5180 - 0.003*m.x5181 - 0.003*m.x5182 - 0.003*m.x5183
- 0.003*m.x5184 - 0.003*m.x5185 - 0.003*m.x5186 - 0.003*m.x5187 - 0.003*m.x5188 - 0.003*m.x5189
- 0.003*m.x5190 - 0.003*m.x5191 - 0.003*m.x5192 - 0.003*m.x5193 - 0.003*m.x5194 - 0.003*m.x5195
- 0.003*m.x5196 - 0.003*m.x5197 - 0.003*m.x5198 - 0.003*m.x5199 - 0.003*m.x5200 - 0.003*m.x5201
- 0.003*m.x5202 - 0.003*m.x5203 - 0.003*m.x5204 - 0.003*m.x5205 - 0.003*m.x5206 - 0.003*m.x5207
- 0.003*m.x5208 - 0.003*m.x5209 - 0.003*m.x5210 - 0.003*m.x5211 - 0.003*m.x5212 - 0.003*m.x5213
- 0.003*m.x5214 - 0.003*m.x5215 - 0.003*m.x5216 - 0.003*m.x5217 - 0.003*m.x5218 - 0.003*m.x5219
- 0.003*m.x5220 - 0.003*m.x5221 - 0.003*m.x5222 - 0.003*m.x5223 - 0.003*m.x5224 - 0.003*m.x5225
- 0.003*m.x5226 - 0.003*m.x5227 - 0.003*m.x5228 - 0.003*m.x5229 - 0.003*m.x5230 - 0.003*m.x5231
- 0.003*m.x5232 - 0.003*m.x5233 - 0.003*m.x5234 - 0.003*m.x5235 - 0.003*m.x5236 - 0.003*m.x5237
- 0.003*m.x5238 - 0.003*m.x5239 - 0.003*m.x5240 - 0.003*m.x5241 - 0.003*m.x5242 - 0.003*m.x5243
- 0.003*m.x5244 - 0.003*m.x5245 - 0.003*m.x5246 - 0.003*m.x5247 - 0.003*m.x5248 - 0.003*m.x5249
- 0.003*m.x5250 - 0.003*m.x5251 - 0.003*m.x5252 - 0.003*m.x5253 - 0.003*m.x5254 - 0.003*m.x5255
- 0.003*m.x5256 - 0.003*m.x5257 - 0.003*m.x5258 - 0.003*m.x5259 - 0.003*m.x5260 - 0.003*m.x5261
- 0.003*m.x5262 - 0.003*m.x5263 - 0.003*m.x5264 - 0.003*m.x5265 - 0.003*m.x5266 - 0.003*m.x5267
- 0.003*m.x5268 - 0.003*m.x5269 - 0.003*m.x5270 - 0.003*m.x5271 - 0.003*m.x5272 - 0.003*m.x5273
- 0.003*m.x5274 - 0.003*m.x5275 - 0.003*m.x5276 - 0.003*m.x5277 - 0.003*m.x5278 - 0.003*m.x5279
- 0.003*m.x5280 - 0.003*m.x5281 - 0.003*m.x5282 - 0.003*m.x5283 - 0.003*m.x5284 - 0.003*m.x5285
- 0.003*m.x5286 - 0.003*m.x5287 - 0.003*m.x5288 - 0.003*m.x5289 - 0.003*m.x5290 - 0.003*m.x5291
- 0.003*m.x5292 - 0.003*m.x5293 - 0.003*m.x5294 - 0.003*m.x5295 - 0.003*m.x5296 - 0.003*m.x5297
- 0.003*m.x5298 - 0.003*m.x5299 - 0.003*m.x5300 - 0.003*m.x5301 - 0.003*m.x5302 - 0.003*m.x5303
- 0.003*m.x5304 - 0.003*m.x5305 - 0.003*m.x5306 - 0.003*m.x5307 - 0.003*m.x5308 - 0.003*m.x5309
- 0.003*m.x5310 - 0.003*m.x5311 - 0.003*m.x5312 - 0.003*m.x5313 - 0.003*m.x5314 - 0.003*m.x5315
- 0.003*m.x5316 - 0.003*m.x5317 - 0.003*m.x5318 - 0.003*m.x5319 - 0.003*m.x5320 - 0.003*m.x5321
- 0.003*m.x5322 - 0.003*m.x5323 - 0.003*m.x5324 - 0.003*m.x5325 - 0.003*m.x5326 - 0.003*m.x5327
- 0.003*m.x5328 - 0.003*m.x5329 - 0.003*m.x5330 - 0.003*m.x5331 - 0.003*m.x5332 - 0.003*m.x5333
- 0.003*m.x5334 - 0.003*m.x5335 - 0.003*m.x5336 - 0.003*m.x5337 - 0.003*m.x5338 - 0.003*m.x5339
- 0.003*m.x5340 - 0.003*m.x5341 - 0.003*m.x5342 - 0.003*m.x5343 - 0.003*m.x5344 - 0.003*m.x5345
- 0.003*m.x5346 - 0.003*m.x5347 - 0.003*m.x5348 - 0.003*m.x5349 - 0.003*m.x5350 - 0.003*m.x5351
- 0.003*m.x5352 - 0.003*m.x5353 - 0.003*m.x5354 - 0.003*m.x5355 - 0.003*m.x5356 - 0.003*m.x5357
- 0.003*m.x5358 - 0.003*m.x5359 - 0.003*m.x5360 - 0.003*m.x5361 - 0.003*m.x5362 - 0.003*m.x5363
- 0.003*m.x5364 - 0.003*m.x5365 - 0.003*m.x5366 - 0.003*m.x5367 - 0.003*m.x5368 - 0.003*m.x5369
- 0.003*m.x5370 - 0.003*m.x5371 - 0.003*m.x5372 - 0.003*m.x5373 - 0.003*m.x5374 - 0.003*m.x5375
- 0.003*m.x5376 - 0.003*m.x5377 - 0.003*m.x5378 - 0.003*m.x5379 - 0.003*m.x5380 - 0.003*m.x5381
- 0.003*m.x5382 - 0.003*m.x5383 - 0.003*m.x5384 - 0.003*m.x5385 - 0.003*m.x5386 - 0.003*m.x5387
- 0.003*m.x5388 - 0.003*m.x5389 - 0.003*m.x5390 - 0.003*m.x5391 - 0.003*m.x5392 - 0.003*m.x5393
- 0.003*m.x5394 - 0.003*m.x5395 - 0.003*m.x5396 - 0.003*m.x5397 - 0.003*m.x5398 - 0.003*m.x5399
- 0.003*m.x5400 - 0.003*m.x5401 - 0.003*m.x5402 - 0.003*m.x5403 - 0.003*m.x5404 - 0.003*m.x5405
- 0.003*m.x5406 - 0.003*m.x5407 - 0.003*m.x5408 - 0.003*m.x5409 - 0.003*m.x5410 - 0.003*m.x5411
- 0.003*m.x5412 - 0.003*m.x5413 - 0.003*m.x5414 - 0.003*m.x5415 - 0.003*m.x5416 - 0.003*m.x5417
- 0.003*m.x5418 - 0.003*m.x5419 - 0.003*m.x5420 - 0.003*m.x5421 - 0.003*m.x5422 - 0.003*m.x5423
- 0.003*m.x5424 - 0.003*m.x5425 - 0.003*m.x5426 - 0.003*m.x5427 - 0.003*m.x5428 - 0.003*m.x5429
- 0.003*m.x5430 - 0.003*m.x5431 - 0.003*m.x5432 - 0.003*m.x5433 - 0.003*m.x5434 - 0.003*m.x5435
- 0.003*m.x5436 - 0.003*m.x5437 - 0.003*m.x5438 - 0.003*m.x5439 - 0.003*m.x5440 - 0.003*m.x5441
- 0.003*m.x5442 - 0.003*m.x5443 - 0.003*m.x5444 - 0.003*m.x5445 - 0.003*m.x5446 - 0.003*m.x5447
- 0.003*m.x5448 - 0.003*m.x5449 - 0.003*m.x5450 - 0.003*m.x5451 - 0.003*m.x5452 - 0.003*m.x5453
- 0.003*m.x5454 - 0.003*m.x5455 - 0.003*m.x5456 - 0.003*m.x5457 - 0.003*m.x5458 - 0.003*m.x5459
- 0.003*m.x5460 - 0.003*m.x5461 - 0.003*m.x5462 - 0.003*m.x5463 - 0.003*m.x5464 - 0.003*m.x5465
- 0.003*m.x5466 - 0.003*m.x5467 - 0.003*m.x5468 - 0.003*m.x5469 - 0.003*m.x5470 - 0.003*m.x5471
- 0.003*m.x5472 - 0.003*m.x5473 - 0.003*m.x5474 - 0.003*m.x5475 - 0.003*m.x5476 - 0.003*m.x5477
- 0.003*m.x5478 - 0.003*m.x5479 - 0.003*m.x5480 - 0.003*m.x5481 - 0.003*m.x5482 - 0.003*m.x5483
- 0.003*m.x5484 - 0.003*m.x5485 - 0.003*m.x5486 - 0.003*m.x5487 - 0.003*m.x5488 - 0.003*m.x5489
- 0.003*m.x5490 - 0.003*m.x5491 - 0.003*m.x5492 - 0.003*m.x5493 - 0.003*m.x5494 - 0.003*m.x5495
- 0.003*m.x5496 - 0.003*m.x5497 - 0.003*m.x5498 - 0.003*m.x5499 - 0.003*m.x5500 - 0.003*m.x5501
- 0.003*m.x5502 - 0.003*m.x5503 - 0.003*m.x5504 - 0.003*m.x5505 - 0.003*m.x5506 - 0.003*m.x5507
- 0.003*m.x5508 - 0.003*m.x5509 - 0.003*m.x5510 - 0.003*m.x5511 - 0.003*m.x5512 - 0.003*m.x5513
- 0.003*m.x5514 - 0.003*m.x5515 - 0.003*m.x5516 - 0.003*m.x5517 - 0.003*m.x5518 - 0.003*m.x5519
- 0.003*m.x5520 - 0.003*m.x5521 - 0.003*m.x5522 - 0.003*m.x5523 - 0.003*m.x5524 - 0.003*m.x5525
- 0.003*m.x5526 - 0.003*m.x5527 - 0.003*m.x5528 - 0.003*m.x5529 - 0.003*m.x5530 - 0.003*m.x5531
- 0.003*m.x5532 - 0.003*m.x5533 - 0.003*m.x5534 - 0.003*m.x5535 - 0.003*m.x5536 - 0.003*m.x5537
- 0.003*m.x5538 - 0.003*m.x5539 - 0.003*m.x5540 - 0.003*m.x5541 - 0.003*m.x5542 - 0.003*m.x5543
- 0.003*m.x5544 - 0.003*m.x5545 - 0.003*m.x5546 - 0.003*m.x5547 - 0.003*m.x5548 - 0.003*m.x5549
- 0.003*m.x5550 - 0.003*m.x5551 - 0.003*m.x5552 - 0.003*m.x5553 - 0.003*m.x5554 - 0.003*m.x5555
- 0.003*m.x5556 - 0.003*m.x5557 - 0.003*m.x5558 - 0.003*m.x5559 - 0.003*m.x5560 - 0.003*m.x5561
- 0.003*m.x5562 - 0.003*m.x5563 - 0.003*m.x5564 - 0.003*m.x5565 - 0.003*m.x5566 - 0.003*m.x5567
- 0.003*m.x5568 - 0.003*m.x5569 - 0.003*m.x5570 - 0.003*m.x5571 - 0.003*m.x5572 - 0.003*m.x5573
- 0.003*m.x5574 - 0.003*m.x5575 - 0.003*m.x5576 - 0.003*m.x5577 - 0.003*m.x5578 - 0.003*m.x5579
- 0.003*m.x5580 - 0.003*m.x5581 - 0.003*m.x5582 - 0.003*m.x5583 - 0.003*m.x5584 - 0.003*m.x5585
- 0.003*m.x5586 - 0.003*m.x5587 - 0.003*m.x5588 - 0.003*m.x5589 - 0.003*m.x5590 - 0.003*m.x5591
- 0.003*m.x5592 - 0.003*m.x5593 - 0.003*m.x5594 - 0.003*m.x5595 - 0.003*m.x5596 - 0.003*m.x5597
- 0.003*m.x5598 - 0.003*m.x5599 - 0.003*m.x5600 - 0.003*m.x5601 - 0.003*m.x5602 - 0.003*m.x5603
- 0.003*m.x5604 - 0.003*m.x5605 - 0.003*m.x5606 - 0.003*m.x5607 - 0.003*m.x5608 - 0.003*m.x5609
- 0.003*m.x5610 - 0.003*m.x5611 - 0.003*m.x5612 - 0.003*m.x5613 - 0.003*m.x5614 - 0.003*m.x5615
- | |
"""
peak, center_x, center_y, radius, focus, width_x, width_y = theta
if 7. < center_x < 14. and 7. < center_y < 14. and 0. < width_x < 0.25 and 0. < width_y < 0.3 and \
peakrange[0] < peak < peakrange[1] and 0.4 < radius < 2. and 0.3 < focus < 2.:
return 0.
else:
return -np.inf
def log_likelihood(theta, x, y, data, var, size):
"""
Logarithm of the likelihood function.
"""
#unpack the parameters
peak, center_x, center_y, radius, focus, width_x, width_y = theta
#1)Generate a model Airy disc
amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius, x_0=int(size[0]/2.-0.5), y_0=int(size[1]/2.-0.5))
airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape(size)
#2)Apply Focus
f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape(size)
model = signal.convolve2d(adata, focusdata, mode='same')
#3)Apply CCD diffusion, approximated with a Gaussian
CCDdata = np.array([[0.0, width_y, 0.0],
[width_x, (1.-width_y-width_y-width_x-width_x), width_x],
[0.0, width_y, 0.0]])
model = signal.convolve2d(model, CCDdata, mode='same').flatten()
#true for Gaussian errors
#lnL = - 0.5 * np.sum((data - model)**2 / var)
#<NAME>. said that this should be from the model not data so recompute var (now contains rn**2)
var += model.copy()
lnL = - (np.size(var)*np.sum(np.log(var))) - (0.5 * np.sum((data - model)**2 / var))
return lnL
def _printResults(best_params, errors):
"""
Print basic results.
"""
print("=" * 60)
print('Fitting with MCMC:')
pars = ['peak', 'center_x', 'center_y', 'radius', 'focus', 'width_x', 'width_y']
print('*'*20 + ' Fitted parameters ' + '*'*20)
for name, value, sig in zip(pars, best_params, errors):
print("{:s} = {:e} +- {:e}" .format(name, value, sig))
print("=" * 60)
def _printFWHM(sigma_x, sigma_y, sigma_xerr, sigma_yerr, req=10.8):
"""
Print results and compare to the requirement at 800nm.
"""
print("=" * 60)
print 'FWHM (requirement %.1f microns):' % req
print round(np.sqrt(_FWHMGauss(sigma_x)*_FWHMGauss(sigma_y)), 2), ' +/- ', \
round(np.sqrt(_FWHMGauss(sigma_xerr)*_FWHMGauss(sigma_yerr)), 3) , ' microns'
print 'x:', round(_FWHMGauss(sigma_x), 2), ' +/- ', round(_FWHMGauss(sigma_xerr), 3), ' microns'
print 'y:', round(_FWHMGauss(sigma_y), 2), ' +/- ', round(_FWHMGauss(sigma_yerr), 3), ' microns'
print("=" * 60)
def _FWHMGauss(sigma, pixel=12):
"""
Returns the FWHM of a Gaussian with a given sigma.
The returned values is in microns (pixel = 12microns).
"""
return sigma*2*np.sqrt(2*np.log(2))*pixel
def _ellipticityFromGaussian(sigmax, sigmay):
"""
Ellipticity
"""
return np.abs((sigmax**2 - sigmay**2) / (sigmax**2 + sigmay**2))
def _ellipticityerr(sigmax, sigmay, sigmaxerr, sigmayerr):
"""
Error on ellipticity.
"""
e = _ellipticityFromGaussian(sigmax, sigmay)
err = e * np.sqrt((sigmaxerr/e)**2 + (sigmayerr/e)**2)
return err
def _R2FromGaussian(sigmax, sigmay, pixel=0.1):
"""
R2.
"""
return (sigmax*pixel)**2 + (sigmay*pixel)**2
def _R2err(sigmax, sigmay, sigmaxerr ,sigmayerr):
"""
Error on R2.
"""
err = np.sqrt((2*_R2FromGaussian(sigmax, sigmay))**2*sigmaxerr**2 +
(2*_R2FromGaussian(sigmax, sigmay))**2*sigmayerr**2)
return err
def _plotDifferenceIndividualVsJoined(individuals, joined, title='800nm', sigma=3,
requirementFWHM=10.8, requirementE=0.156, requirementR2=0.002,
truthx=None, truthy=None, FWHMlims=(7.6, 10.3)):
"""
Simple plot
"""
ind = []
for file in g.glob(individuals):
print file
ind.append(fileIO.cPicleRead(file))
join = fileIO.cPicleRead(joined)
xtmp = np.arange(len(ind)) + 1
#plot FWHM
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
fig.subplots_adjust(hspace=0, top=0.93, bottom=0.17, left=0.12, right=0.98)
ax1.set_title(title)
wxind = np.asarray([_FWHMGauss(data['wx']) for data in ind])
wyind = np.asarray([_FWHMGauss(data['wy']) for data in ind])
wxerr = np.asarray([sigma*_FWHMGauss(data['wxerr']) for data in ind])
wyerr = np.asarray([sigma*_FWHMGauss(data['wyerr']) for data in ind])
ax1.errorbar(xtmp, wxind, yerr=wxerr, fmt='o')
ax1.errorbar(xtmp[-1]+1, _FWHMGauss(join['wx']), yerr=sigma*_FWHMGauss(join['wxerr']), fmt='s', c='r')
ax2.errorbar(xtmp, wyind, yerr=wyerr, fmt='o')
ax2.errorbar(xtmp[-1]+1, _FWHMGauss(join['wy']), yerr=sigma*_FWHMGauss(join['wyerr']), fmt='s', c='r')
geommean = np.sqrt(wxind*wyind)
err = np.sqrt(wxerr*wyerr)
ax3.errorbar(xtmp, geommean, yerr=err, fmt='o')
ax3.errorbar(xtmp[-1]+1, np.sqrt(_FWHMGauss(join['wx'])*_FWHMGauss(join['wy'])),
yerr=sigma*np.sqrt(_FWHMGauss(join['wxerr'])*_FWHMGauss(join['wyerr'])), fmt='s', c='r')
#simulations
if truthx is not None:
ax1.axhline(y=_FWHMGauss(truthx), label='Input', c='g')
if truthy is not None:
ax2.axhline(y=_FWHMGauss(truthy), label='Input', c='g')
ax3.axhline(y=np.sqrt(_FWHMGauss(truthx)*_FWHMGauss(truthy)), label='Input', c='g')
#requirements
if requirementFWHM is not None:
ax1.axhline(y=requirementFWHM, label='Requirement (800nm)', c='r', ls='--')
ax2.axhline(y=requirementFWHM, label='Requirement (800nm)', c='r', ls='--')
ax3.axhline(y=requirementFWHM, label='Requirement (800nm)', c='r', ls='-')
plt.sca(ax1)
plt.xticks(visible=False)
plt.sca(ax2)
plt.xticks(visible=False)
plt.sca(ax3)
ltmp = np.hstack((xtmp, xtmp[-1]+1))
plt.xticks(ltmp, ['Individual %i' % x for x in ltmp[:-1]] + ['Joint',], rotation=45)
#ax1.set_ylim(7.1, 10.2)
ax1.set_ylim(*FWHMlims)
ax2.set_ylim(*FWHMlims)
#ax2.set_ylim(8.6, 10.7)
ax3.set_ylim(*FWHMlims)
ax1.set_xlim(xtmp.min()*0.9, (xtmp.max() + 1)*1.05)
ax2.set_xlim(xtmp.min()*0.9, (xtmp.max() + 1)*1.05)
ax3.set_xlim(xtmp.min()*0.9, (xtmp.max() + 1)*1.05)
ax1.set_ylabel(r'FWHM$_{X} \, [\mu$m$]$')
ax2.set_ylabel(r'FWHM$_{Y} \, [\mu$m$]$')
#ax3.set_ylabel(r'FWHM$=\sqrt{FWHM_{X}FWHM_{Y}} \quad [\mu$m$]$')
ax3.set_ylabel(r'FWHM$ \, [\mu$m$]$')
ax1.legend(shadow=True, fancybox=True)
plt.savefig('IndividualVsJoinedFWHM%s.pdf' % title)
plt.close()
#plot R2 and ellipticity
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
fig.subplots_adjust(hspace=0, top=0.93, bottom=0.17, left=0.12, right=0.98)
ax1.set_title(title)
R2x = [_R2FromGaussian(data['wx'], data['wy'])*1e3 for data in ind]
errR2 = [sigma*1.e3*_R2err(data['wx'], data['wy'], data['wxerr'], data['wyerr']) for data in ind]
ax1.errorbar(xtmp, R2x, yerr=errR2, fmt='o')
ax1.errorbar(xtmp[-1]+1, _R2FromGaussian(join['wx'], join['wy'])*1e3,
yerr=sigma*1.e3*_R2err(join['wx'], join['wy'], join['wxerr'], join['wyerr']), fmt='s')
ell = [_ellipticityFromGaussian(data['wx'], data['wy']) for data in ind]
ellerr = [sigma*_ellipticityerr(data['wx'], data['wy'], data['wxerr'], data['wyerr']) for data in ind]
ax2.errorbar(xtmp, ell, yerr=ellerr, fmt='o')
ax2.errorbar(xtmp[-1]+1, _ellipticityFromGaussian(join['wx'], join['wy']),
yerr=sigma*_ellipticityerr(join['wx'], join['wy'], join['wxerr'], join['wyerr']), fmt='s')
if requirementE is not None:
ax2.axhline(y=requirementE, label='Requirement (800nm)', c='r')
if requirementR2 is not None:
ax1.axhline(y=requirementR2*1e3, label='Requirement (800nm)', c='r')
#simulations
if truthx and truthy is not None:
ax2.axhline(y=_ellipticityFromGaussian(truthx, truthy), label='Input', c='g')
ax1.axhline(y= _R2FromGaussian(truthx, truthy)*1e3, label='Input', c='g')
plt.sca(ax1)
plt.xticks(visible=False)
plt.sca(ax2)
ltmp = np.hstack((xtmp, xtmp[-1]+1))
plt.xticks(ltmp, ['Individual%i' % x for x in ltmp[:-1]] + ['Joint',], rotation=45)
ax1.set_ylim(0.0011*1e3, 0.003*1e3)
ax2.set_ylim(0., 0.23)
ax1.set_xlim(xtmp.min()*0.9, (xtmp.max() + 1)*1.05)
ax2.set_xlim(xtmp.min()*0.9, (xtmp.max() + 1)*1.05)
ax1.set_ylabel(r'$R^{2}$ [mas$^{2}$]')
ax2.set_ylabel('ellipticity')
ax1.legend(shadow=True, fancybox=True)
plt.savefig('IndividualVsJoinedR2e%s.pdf' % title)
plt.close()
def _plotModelResiduals(id='simulated800nmJoint1', folder='results/', out='Residual.pdf', individual=False):
"""
Generate a plot with data, model, and residuals.
"""
#data
if individual:
data = pf.getdata(folder+id+'small.fits')
data[data < 1] = 1.
data = np.log10(data)
else:
data = pf.getdata(folder+id+'datafit.fits')
data[data < 1] = 1.
data = np.log10(data)
#model
model = pf.getdata(folder+id+'model.fits ')
model[model < 1] = 1.
model = np.log10(model)
#residual
residual = pf.getdata(folder+id+'residual.fits')
#squared residual
residualSQ = pf.getdata(folder+id+'residualSQ.fits')
max = np.max((data.max(), model.max()))
#figure
fig = plt.figure(figsize=(12, 12))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
ax = [ax1, ax2, ax3, ax4]
fig.subplots_adjust(hspace=0.05, wspace=0.3, top=0.95, bottom=0.02, left=0.02, right=0.9)
ax1.set_title('Data')
ax2.set_title('Model')
ax3.set_title('Residual')
ax4.set_title('$L^{2}$ Residual')
im1 = ax1.imshow(data, interpolation='none', vmax=max, origin='lower', vmin=0.1)
im2 = ax2.imshow(model, interpolation='none', vmax=max, origin='lower', vmin=0.1)
im3 = ax3.imshow(residual, interpolation='none', origin='lower', vmin=-100, vmax=100)
im4 = ax4.imshow(residualSQ, interpolation='none', origin='lower', vmin=0., vmax=10)
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
divider = make_axes_locatable(ax3)
cax3 = divider.append_axes("right", size="5%", pad=0.05)
divider = make_axes_locatable(ax4)
cax4 = divider.append_axes("right", size="5%", pad=0.05)
cbar1 = plt.colorbar(im1, cax=cax1)
cbar1.set_label(r'$\log_{10}(D_{i, j} \quad [e^{-}]$)')
cbar2 = plt.colorbar(im2, cax=cax2)
cbar2.set_label(r'$\log_{10}(M_{i, j} \quad [e^{-}]$)')
cbar3 = plt.colorbar(im3, cax=cax3)
cbar3.set_label(r'$M_{i, j} - D_{i, j} \quad [e^{-}]$')
cbar4 = plt.colorbar(im4, cax=cax4)
cbar4.set_label(r'$\frac{(M_{i, j} - D_{i, j})^{2}}{\sigma_{CCD}^{2}}$')
for tmp in ax:
plt.sca(tmp)
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.savefig(out)
plt.close()
def plotAllResiduals():
"""
Plot residuals of all model fits.
"""
#Joint fits
files = g.glob('results/J*.fits')
individuals = [file for file in files if 'datafit' in file]
for file in individuals:
id = file.replace('results/', '').replace('datafit.fits', '')
print 'processing:', id
_plotModelResiduals(id=id, folder='results/', out='results/%sResidual.pdf' % id)
#Individual fits
files = g.glob('results/I*.fits')
individuals = [file for file in files if 'model' in file]
for file in individuals:
id = file.replace('results/', '').replace('model.fits', '')
print 'processing:', id
_plotModelResiduals(id=id, folder='results/', out='results/%sResidual.pdf' % id, individual=True)
def _amplitudeFromPeak(peak, x, y, radius, x_0=10, y_0=10):
"""
This function can be used to estimate an Airy disc amplitude from the peak pixel, centroid and radius.
"""
rz = jn_zeros(1, 1)[0] / np.pi
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / rz)
if r == 0.:
return peak
rt = np.pi * r
z = (2.0 * j1(rt) / rt)**2
amp = peak / z
return amp
def _peakFromTruth(theta, size=21):
"""
Derive the peak value from the parameters used for simulations.
"""
amplitude, center_x, center_y, radius, focus, width_x, width_y = theta
x = np.arange(0, size)
y = np.arange(0, size)
x, y = np.meshgrid(x, y)
airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
adata = airy.eval(x, y, amplitude, center_x, center_y, radius)
return adata.max()
def _simpleExample(CCDx=10, CCDy=10):
spot = np.zeros((21, 21))
#Create the coordinates x and y
x = np.arange(0, spot.shape[1])
y = np.arange(0, spot.shape[0])
#Put the coordinates in a mesh
xx, yy = np.meshgrid(x, y)
peak, center_x, center_y, radius, focus, width_x, width_y = (200000, 10.1, 9.95, 0.5, 0.5, 0.03, 0.06)
amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius, x_0=CCDx, y_0=CCDy)
airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape)
f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape)
foc = signal.convolve2d(adata, focusdata, mode='same')
fileIO.writeFITS(foc, 'TESTfocus.fits', int=False)
CCDdata = np.array([[0.0, width_y, 0.0],
[width_x, (1.-width_y-width_y-width_x-width_x), | |
v))
pass
#
# load polygons face
#
vIndex = 0
model = Character(modelName)
model.setPythonTag('path', pmx_model.path)
model.setPythonTag('version', str(pmx_model.version))
model.setPythonTag('name', modelName)
model.setPythonTag('english_name', pmx_model.english_name)
model.setPythonTag('comment', pmx_model.comment)
model.setPythonTag('english_comment', pmx_model.english_comment)
modelPath = NodePath(model)
modelBody = ModelRoot('Body')
modelBody.setPythonTag('Skins', skins)
bodyPath = NodePath(modelBody)
bodyPath.reparentTo(modelPath)
materials = MaterialCollection()
matIndex = 0
matCount = len(pmx_model.materials)
for mat in pmx_model.materials:
#
# load materials
#
log(u'Loading Material %03d: %s' % (matIndex, mat.name), force=True)
material = Material(mat.name)
material.setDiffuse(VBase4(mat.diffuse_color.r, mat.diffuse_color.g, mat.diffuse_color.b, mat.alpha))
if mat.specular_factor > 0 or (mat.specular_color.r != 1 and mat.specular_color.g != 1 and mat.specular_color.b != 1):
material.setSpecular(VBase4(mat.specular_color.r, mat.specular_color.g, mat.specular_color.b, 1))
material.setShininess(mat.specular_factor*20)
else:
material.setSpecular(VBase4(mat.ambient_color.r, mat.ambient_color.g, mat.ambient_color.b, 0.01))
material.setShininess(0)
material.setAmbient(VBase4(mat.ambient_color.r, mat.ambient_color.g, mat.ambient_color.b, 1))
material.setEmission(VBase4(0, 0, 0, 1))
matflag_twoside = bool(mat.flag & 0b00000001) # 两面描画
matflag_shadowfloor = bool(mat.flag & 0b00000010) # 地面影
matflag_shadowself0 = bool(mat.flag & 0b00000100) # セルフ影マツ
matflag_shadowself1 = bool(mat.flag & 0b00001000) # セルフ影
matflag_outline = bool(mat.flag & 0b00010000) # 輪郭有效
# material.setLocal(False)
material.setLocal(True)
if matflag_twoside:
# 两面描画
material.setTwoside(True)
else:
material.setTwoside(False)
if matflag_shadowfloor:
# 地面影
pass
if matflag_shadowself0:
# セルフ影マツ
pass
if matflag_shadowself1:
# セルフ影
pass
if matflag_outline:
# 輪郭有效
pass
materials.addMaterial(material)
log(u'Loaded Material %03d: %s' % (matIndex, mat.name))
#
# Load vertex for every material/polygon face
#
prim = GeomTriangles(Geom.UHDynamic)
log(u'Loading Polygons %03d: %s' % (matIndex, mat.name), force=True)
for idx in range(vIndex, vIndex+mat.vertex_count, 3):
# flip trig-face for inverted axis-y/axis-z
prim.addVertices(pmx_model.indices[idx+2], pmx_model.indices[idx+1], pmx_model.indices[idx+0])
prim.closePrimitive()
geom = Geom(vdata)
geom.addPrimitive(prim)
node = GeomNode(mat.name)
node.addGeom(geom)
nodePath = NodePath(node)
nodePath.setPythonTag('english_name', mat.english_name)
#
# set polygon face material
#
# Apply the material to this nodePath
tsid = matCount - matIndex
tsid_main = tsid
tsid_sphere = tsid
tsid_toon = tsid
# tsid_main = 1
# tsid_sphere = 2
# tsid_toon = 3
nodePath.setMaterial(material, tsid_main)
nodePath.setTwoSided(material.getTwoside())
nodePath.setPythonTag('edge_color', mat.edge_color)
nodePath.setPythonTag('edge_size', mat.edge_size)
nodePath.setPythonTag('material_index', matIndex)
nodePath.setPythonTag('material', material)
nodePath.setPythonTag('vIndex', vIndex)
nodePath.setPythonTag('vCount', mat.vertex_count)
nodePath.setPythonTag('pickableObjTag', 1)
if mat.texture_index < 0 and mat.sphere_texture_index < 0 and mat.toon_texture_index < 0:
nodePath.setTransparency(TransparencyAttrib.MDual, matIndex)
else:
if mat.alpha == 1:
nodePath.setTransparency(TransparencyAttrib.MNone, matIndex)
else:
nodePath.setTransparency(TransparencyAttrib.MAlpha, matIndex)
pass
# if mat.alpha<1:
# nodePath.setTransparency(TransparencyAttrib.MAlpha, matIndex)
#
# set polygon face main textures
#
if mat.texture_index >= 0:
# print('Texture %s : Main %03d' % (mat.name, mat.texture_index))
texMain = textures[mat.texture_index]
if texMain and texMain.hasRamImage():
if matflag_outline:
# 輪郭有效
texMain.setBorderColor(VBase4(mat.edge_color.r, mat.edge_color.g, mat.edge_color.b, mat.edge_color.a))
pass
# texMain.setWrapU(Texture.WMClamp)
ts_main = TextureStage('%3d_%s_main' % (matIndex, mat.name))
ts_main.setColor(VBase4(mat.ambient_color.r, mat.ambient_color.g, mat.ambient_color.b, 1))
ts_main.setSort(tsid_main)
ts_main.setPriority(tsid_main)
if hasAlpha(texMain):
# if nodePath.getTransparency() != TransparencyAttrib.MNone:
# pass
#
# it's a stupid method for setTransparency, but now i can not found another effected method
#
if not matflag_outline:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif matflag_outline and mat.edge_color.a != 1:
nodePath.setTransparency(TransparencyAttrib.MMultisample, tsid_main)
elif mat.edge_color.a == 1:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.sphere_texture_index < 0:
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
else:
# nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
nodePath.setTransparency(TransparencyAttrib.MMultisample, tsid_main)
# print('setting alpha except')
pass
#
# it's a stupid method for setTransparency, but now i can not found another effected method
#
# print mat.name.lower()[:4]
if mat.alpha == 1:
if matflag_twoside and mat.specular_factor > 100:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif not matflag_twoside and mat.specular_factor > 20:
nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
elif matflag_twoside and mat.specular_factor > 20:
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
elif matflag_twoside and mat.specular_factor > 10:
pass
elif matflag_twoside and 2 < mat.specular_factor <= 10:
nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
pass
elif matflag_twoside and mat.specular_factor >= 5:
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
elif matflag_twoside and mat.specular_factor > 1:
nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
elif mat.alpha == 0:
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
elif 0.998 <= mat.alpha < 1:
nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
elif mat.sphere_texture_index < 0 and mat.specular_factor > 0:
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
elif mat.sphere_texture_index < 0 and not matflag_twoside and mat.specular_factor > 20:
nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
else:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
# nodePath.setTransparency(TransparencyAttrib.MBinary, tsid_main)
#
# setting alpha for transparency color mode texture
#
texImage = texMain.getRamImageAs('RGB')
pixel_LT = texImage.getData()[0:3]
# pr,pg,pb = ord(pixel_LT[0]), ord(pixel_LT[1]), ord(pixel_LT[2])
pr,pg,pb = pixel_LT[0], pixel_LT[1], pixel_LT[2]
print('rgb(%d, %d, %d)' % (pr, pg, pb))
if pr == mat.diffuse_color.r*255 and pg == mat.diffuse_color.g*255 and pb == mat.diffuse_color.b*255:
print('--> Left-Top Pixel is Diffuse')
nodePath.setTransparency(TransparencyAttrib.MBinary, tsid_main)
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
pass
elif pr == 0xff and pg == 0xff and pb == 0xff:
print('--> Left-Top Pixel is WHITE')
if(hasAlpha(texMain)):
nodePath.setTransparency(TransparencyAttrib.MMultisample, tsid_main)
else:
nodePath.setTransparency(TransparencyAttrib.MBinary, tsid_main)
# nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
elif pr == 0x00 and pg == 0x00 and pb == 0x00:
print('--> Left-Top Pixel is BLACK')
nodePath.setTransparency(TransparencyAttrib.MMultisample, tsid_main)
else:
if mat.alpha == 1 and not hasAlpha(texMain):
nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
elif mat.edge_color.r == 1 and mat.edge_color.g == 1 and mat.edge_color.b == 1:
nodePath.setTransparency(TransparencyAttrib.MMultisample, tsid_main)
elif mat.edge_color.r == 0 and mat.edge_color.g == 0 and mat.edge_color.b == 0:
nodePath.setTransparency(TransparencyAttrib.MMultisample, tsid_main)
# else:
# ts_main.setMode(TextureStage.MReplace)
# nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
#
# Special material alpha setting. A stupid behavior
#
if mat.name.lower() in ['hairshadow', 'other', 'body']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.lower()[:4] in ['face']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.lower()[:3] in ['eye']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name in ['肌', '顔', '髪影', 'レース']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.find('髪') >= 0:
if mat.toon_texture_index >= 0:
nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
elif hasAlpha(texMain) and not matflag_outline:
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
elif matflag_outline:
nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
# else:
# # ts_main.setMode(TextureStage.MModulateGloss)
# nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
elif mat.name in ['スカート']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name in ['瞳']:
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
elif mat.name.find('瞳') >= 0:
# ts_main.setMode(TextureStage.MModulateGloss)
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
elif mat.name in ['頬']:
ts_main.setMode(TextureStage.MModulateGloss)
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.find('頬') >= 0:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name in ['白目']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.find('マーク') >= 0:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.find('グレイ') >= 0:
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
elif mat.name.find('マーク') >= 0:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.find('透過') >= 0 and (0 < mat.alpha < 1):
nodePath.setTransparency(TransparencyAttrib.MMultisample, tsid_main)
elif mat.name.find('hair') >= 0:
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
elif mat.name in ['服'] or mat.name.find('服') >= 0:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.find('影') >= 0:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
pass
if mat.name in ['顔鼻', '鼻', '顔', '鼻影']:
ts_main.setMode(TextureStage.MReplace)
pass
if matflag_shadowfloor:
# 地面影
pass
if mat.alpha >= 0:
nodePath.setTexture(ts_main, texMain, tsid_main)
nodePath.setTexScale(ts_main, 1, -1, -1)
else:
if 0 < mat.alpha < 1:
nodePath.setTransparency(TransparencyAttrib.MAlpha, tsid_main)
pass
else:
if mat.name.lower() in ['hairshadow', 'other']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.lower()[:4] in ['face']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.lower()[:3] in ['eye']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name in ['肌', '顔', '髪影', 'レース']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name in ['スカート', '瞳']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name in ['白目']:
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_main)
elif mat.name.find('髪') >= 0:
nodePath.setTransparency(TransparencyAttrib.MNone, tsid_main)
pass
#
# Set Sphere Texture
#
if mat.sphere_texture_index >= 0:
# print('Texture %s : Sphere %03d' % (mat.name, mat.sphere_texture_index))
if mat.sphere_mode > 0:
texSphere = textures[mat.sphere_texture_index]
if texSphere and texSphere.hasRamImage():
if mat.sphere_mode == 1:
# texMode = TextureStage.MModulateGloss
texMode = TextureStage.MModulate
elif mat.sphere_mode == 2:
texMode = TextureStage.MAdd
elif mat.sphere_mode == 3:
texMode = TextureStage.MReplace
else:
texMode = TextureStage.MModulate
ts_sphere = TextureStage('%3d_%s_sphere' % (matIndex, mat.name))
ts_sphere.setMode(texMode)
ts_sphere.setColor(VBase4(mat.specular_color.r, mat.specular_color.g, mat.specular_color.b, 1))
ts_sphere.setSort(tsid_sphere)
ts_sphere.setPriority(tsid_sphere)
nodePath.setTexGen(ts_sphere, TexGenAttrib.MEyeSphereMap, tsid_sphere)
nodePath.setTexture(ts_sphere, texSphere, tsid_sphere)
nodePath.setTexScale(ts_sphere, 1, -1, -1)
# nodePath.setShaderAuto(matIndex)
if mat.texture_index < 0:
if hasAlpha(texSphere):
nodePath.setTransparency(TransparencyAttrib.MDual, tsid_sphere)
# else:
# nodePath.setTransparency(TransparencyAttrib.MNone, tsid_sphere)
#
# Set Toon Texture
#
if mat.toon_texture_index>=0:
# print('Texture %s : Toon %03d' % (mat.name, mat.toon_texture_index))
if mat.toon_sharing_flag > 0:
texToon = loadTexture(u'toon/toon%02d.bmp' % (mat.toon_texture_index+1))
elif (mat.toon_texture_index < 0) or (not textures[mat.toon_texture_index]):
texToon = Texture('NULL')
else:
texToon = textures[mat.toon_texture_index]
if texToon and texToon.hasRamImage():
# texMode = TextureStage.MDecal
# texMode = TextureStage.MGloss
# texMode = TextureStage.MAdd
texMode = TextureStage.MModulate #Glow
ts_toon = TextureStage('%3d_%s_toon' % (matIndex, mat.name))
ts_toon.setColor(VBase4(0,0,0, .18))
ts_toon.setMode(texMode)
ts_toon.setSort(tsid_toon)
ts_toon.setPriority(tsid_toon)
nodePath.setTexGen(ts_toon, TexGenAttrib.MEyeSphereMap, tsid_toon)
nodePath.setTexture(ts_toon, texToon, tsid_toon)
nodePath.setTexScale(ts_toon, 1, -1, -1)
pass
# nodePath.setBin("unsorted", matIndex)
nodePath.setAntialias(AntialiasAttrib.MAuto)
#
# MNone = 0, MAlpha = 1, MNotused = 2, MMultisample = 3,
# MMultisampleMask = 4, MBinary = 5, MDual = 6
#
# print(nodePath.getTransparency())
print(str(TransparencyAttrib.make(nodePath.getTransparency())).strip())
vIndex += mat.vertex_count
# modelBody.addChild(node)
nodePath.reparentTo(bodyPath)
log(u'Loaded Polygons %03d: %s' % (matIndex, mat.name))
matIndex += 1
# modelPath = NodePath(model)
# modelPath.setShaderAuto()
return(modelPath)
pass
def loadPmxBone(pmx_model):
def GetParentNode(root, parent_index):
node = None
if parent_index == -1:
node = root
pass
else:
for child in root.getChildren():
node = GetParentNode(child, parent_index)
if node:
break
else:
boneIndex = child.getPythonTag('boneIndex')
if boneIndex == parent_index:
node = child
break
pass
return(node)
pass
#
# Load Bone outline for display
#
data = EggData()
data.read('stages/bone.egg')
# data.read('stages/bone_oct.egg')
# data.read('stages/bone_cone.egg')
dnp = NodePath(loadEggData(data))
dnp.setColor(LVector4f(1,1,0,1))
boneOutline = dnp.node().getChild(0)
min_point = LPoint3f()
max_point = LPoint3f()
dnp.calcTightBounds(min_point, max_point)
bone_size = LPoint3f(max_point.x-min_point.x, max_point.y-min_point.y, max_point.z-min_point.z)
#
# Load Bone data
#
formatArray = GeomVertexArrayFormat()
formatArray.addColumn(InternalName.make(str("vindex")), 1, Geom.NTUint32, Geom.CIndex)
formatArray.addColumn(InternalName.make(str("tindex")), 1, Geom.NTFloat32, Geom.COther)
formatArray.addColumn(InternalName.make(str("pindex")), 1, Geom.NTFloat32, | |
import typing
from .._block_utils import _load_btype, BlockParam, _load_btypes
from ..actions import EntityAction
from ..ifs import IfEntity
from ...classes import Arguments, Tag, DFNumber
from ...enums import EntityTarget, EntityActionType, IfEntityType, \
BlockType, Hand, EffectParticleMode, HorseVariant, HorseColor, MooshroomVariant, \
EntityAnimation, CatType, EntityColor, FoxType, PandaGene, ParrotVariant, ArmorStandPart, RabbitType, \
TropicalFishPattern, VillagerProfession, VillagerBiome
from ...typings import Textable, Numeric, Locatable, ItemParam, Potionable, ParticleParam, p_check, SpawnEggable, \
p_bool_check, Listable
from ...utils import remove_u200b_from_doc
__all__ = ("Entity",)
class Entity:
"""Represents a DiamondFire Entity. Used for Entity Action and If Entity humanized methods.
Parameters
----------\u200b
target : Optional[:class:`~.EntityTarget`], optional
The target that this instance represents (Default Entity, Last Mob, Victim etc.) or ``None`` for empty target
(equivalent to leaving the target line empty on DF - becomes the current selection, or the Default Entity).
Defaults to ``None``.
Attributes
----------\u200b
target : Optional[:class:`~.EntityTarget`]
The target that this instance represents (Default Entity, Last Mob, Victim etc.) or ``None`` for empty target
(equivalent to leaving the target line empty on DF - becomes the current selection, or the Default Entity).
"""
__slots__ = ("target",)
target: typing.Optional[EntityTarget]
def __init__(self, target: typing.Optional[EntityTarget]):
self.target: typing.Optional[EntityTarget] = EntityTarget(target) if target else None
def _digest_target(self, target: typing.Optional[EntityTarget]) -> typing.Optional[EntityTarget]:
"""Checks a given entity target for validity.
Parameters
----------
target : Optional[:class:`~.EntityTarget`]
The target to check.
Returns
-------
Optional[:class:`~.EntityTarget`]
Returns the given target as a valid EntityTarget, or ``None``.
"""
return EntityTarget(target) if target else None
# region:entityactions
def set_armor_stand_tags(
self,
*, is_visible: typing.Optional[bool] = None, is_marker: typing.Optional[bool] = None,
allow_item_taking_or_adding: typing.Optional[bool] = None,
has_physics_or_updates: typing.Optional[bool] = None, is_small: typing.Optional[bool] = None,
has_arms: typing.Optional[bool] = None, has_base_plate: typing.Optional[bool] = None,
target: typing.Optional[EntityTarget] = None
):
"""Changes the settings of an armor stand, such as visibility.
.. rank:: Mythic
Parameters
----------
is_visible : Optional[:class:`bool`], optional
Whether this Armor Stand is visible.
Specify ``True`` or ``False`` to change this setting, or ``None`` to leave it untouched. Defaults to
``None``.
is_marker : Optional[:class:`bool`], optional
Whether this Armor Stand is a marker (has no hitbox).
Specify ``True`` or ``False`` to change this setting, or ``None`` to leave it untouched. Defaults to
``None``.
allow_item_taking_or_adding : Optional[:class:`bool`], optional
Whether this armor stand should have item taking/adding allowed.
Specify ``True`` or ``False`` to change this setting, or ``None`` to leave it untouched. Defaults to
``None``.
has_physics_or_updates : Optional[:class:`bool`], optional
Specify ``True`` or ``False`` to change this setting, or ``None`` to leave it untouched. Defaults to
``None``.
is_small : Optional[:class:`bool`], optional
Specify ``True`` or ``False`` to change this setting, or ``None`` to leave it untouched. Defaults to
``None``.
has_arms : Optional[:class:`bool`], optional
Specify ``True`` or ``False`` to change this setting, or ``None`` to leave it untouched. Defaults to
``None``.
has_base_plate : Optional[:class:`bool`], optional
Specify ``True`` or ``False`` to change this setting, or ``None`` to leave it untouched. Defaults to
``None``.
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or ``None`` for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_entity.set_armor_stand_tags(is_visible=True, has_physics_or_updates=False) # selects last spawned entity
# OR
Entity(EntityTarget.LAST_ENTITY).set_armor_stand_tags(is_visible=True, has_physics_or_updates=False)
# Makes the armor stand visible and makes it not affected by physics or updates; other params unchanged
"""
args = Arguments([], tags=[ # Set to True, Set to False, Don't Change
Tag(
"Is Visible",
option="Set to True" if is_visible else (
"Set to False" if is_visible is not None else "Don't Change"
), # default is Don't Change
action=EntityActionType.ARMOR_STAND_TAGS, block=BlockType.ENTITY_ACTION
),
Tag(
"Is Marker (No Hitbox)",
option="Set to True" if is_marker else (
"Set to False" if is_marker is not None else "Don't Change"
), # default is Don't Change
action=EntityActionType.ARMOR_STAND_TAGS, block=BlockType.ENTITY_ACTION
),
Tag(
"Allow Item Taking / Adding",
option="Set to True" if allow_item_taking_or_adding else (
"Set to False" if allow_item_taking_or_adding is not None else "Don't Change"
), # default is Don't Change
action=EntityActionType.ARMOR_STAND_TAGS, block=BlockType.ENTITY_ACTION
),
Tag(
"Has Physics / Updates",
option="Set to True" if has_physics_or_updates else (
"Set to False" if has_physics_or_updates is not None else "Don't Change"
), # default is Don't Change
action=EntityActionType.ARMOR_STAND_TAGS, block=BlockType.ENTITY_ACTION
),
Tag(
"Is Small",
option="Set to True" if is_small else (
"Set to False" if is_small is not None else "Don't Change"
), # default is Don't Change
action=EntityActionType.ARMOR_STAND_TAGS, block=BlockType.ENTITY_ACTION
),
Tag(
"Has Arms",
option="Set to True" if has_arms else (
"Set to False" if has_arms is not None else "Don't Change"
), # default is Don't Change
action=EntityActionType.ARMOR_STAND_TAGS, block=BlockType.ENTITY_ACTION
),
Tag(
"Has Base Plate",
option="Set to True" if has_base_plate else (
"Set to False" if has_base_plate is not None else "Don't Change"
), # default is Don't Change
action=EntityActionType.ARMOR_STAND_TAGS, block=BlockType.ENTITY_ACTION
)
])
return EntityAction(
action=EntityActionType.ARMOR_STAND_TAGS,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def disguise_as_block(
self, block_type: BlockParam, name: typing.Optional[Textable] = None,
*, target: typing.Optional[EntityTarget] = None
):
"""Disguises the entity as a block.
.. rank:: Overlord
Parameters
----------
block_type : Union[:class:`Material`, :attr:`~.ItemParam`, :attr:`~.Textable`]
The type of Block disguise.
The type can be specified either as:
- an instance of :class:`~.Material` (the material of the block to set);
- an item (:attr:`~.ItemParam` - the item representing the block to set);
- text (:attr:`~.Textable` - the material of the block to set as text).
name : Optional[:attr:`~.Textable`], optional
Name of disguise. Default is ``None`` (no special name).
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
block_type = Material.GRASS_BLOCK # disguise as a grass block
last_entity.disguise_as_block(block_type, "Some Block")
# OR
Entity(EntityTarget.LAST_ENTITY).disguise_as_block(block_type, "Some Block")
# last spawned entity is disguised as a grass block named "Some Block"
"""
args = Arguments([
p_check(block_type, typing.Union[ItemParam, Textable], "block_type"),
p_check(name, Textable, "name") if name is not None else None
])
return EntityAction(
action=EntityActionType.BLOCK_DISGUISE,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_creeper_charged(
self, is_charged: bool = True,
*, target: typing.Optional[EntityTarget] = None
):
"""Sets whether a creeper has the charged effect.
Parameters
----------
is_charged : :class:`bool`, optional
Whether or not the target creeper should be target. Defaults to ``True`` (should be charged).
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_entity.creeper_charged(True)
# OR
Entity(EntityTarget.LAST_ENTITY).creeper_charged(True)
# Creeper is now charged; False for not charged.
"""
args = Arguments([], tags=[
Tag(
"Is Charged", option=bool(is_charged), # default is True
action=EntityActionType.CREEPER_CHARGED, block=BlockType.ENTITY_ACTION
)
])
return EntityAction(
action=EntityActionType.CREEPER_CHARGED,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_creeper_ignited(
self, is_ignited: bool = True,
*, target: typing.Optional[EntityTarget] = None
):
"""Sets whether a creeper is currently ignited. (getting ready to explode)
Parameters
----------
is_ignited : :class:`bool`, optional
Whether or not the Creeper is ignited. Defaults to ``True`` (is ignited).
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.set_creeper_ignited(True) # works if last mob spawned is a creeper
# OR
Entity(EntityTarget.LAST_MOB).set_creeper_ignited(True) # creeper is now ignited; False for not ignited
"""
args = Arguments([], tags=[
Tag(
"Is Ignited", option=bool(is_ignited), # default is True
action=EntityActionType.CREEPER_IGNITED, block=BlockType.ENTITY_ACTION
)
])
return EntityAction(
action=EntityActionType.CREEPER_IGNITED,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_creeper_max_fuse(
self, ticks: Numeric,
*, target: typing.Optional[EntityTarget] = None
):
"""Sets the starting amount of fuse ticks of a creeper.
Parameters
----------
ticks : :attr:`~.Numeric`
Fuse ticks.
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.set_creeper_max_fuse(40) # if last mob spawned is a creeper, this will work
# OR
Entity(EntityTarget.LAST_MOB).set_creeper_max_fuse(40) # fuse is now 2 seconds
"""
args = Arguments([
p_check(ticks, Numeric, "ticks")
])
return EntityAction(
action=EntityActionType.CREEPER_MAX_FUSE,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_creeper_radius(
self, radius: Numeric,
*, target: typing.Optional[EntityTarget] = None
):
"""Sets the explosion radius of a creeper.
Parameters
----------
radius : :attr:`~.Numeric`
The new explosion radius.
.. note::
The maximum radius is 25.
target : | |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchtext.legacy.datasets import Multi30k
from torchtext.legacy.data import Field, BucketIterator
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import spacy
import numpy as np
import random
import math
import time
"""
About: the model is made of encoder and decoder
- Encoder encodes the input sequence, in the source language, into a context vector
- Decoder decodes the context vector to produce the output sentence in the target language
ENCODER:
- The previous models had encoder that compresses an entire input sentence into a single context vector
- the CS2S is different - it gets 2 context vector for each token in an input sentence
- 2 context vectors per token are conved vector and combined vector.
- The token is first pass thru the token embedding layer and the positional embedding layer
+ Positional embedding layer = elementwise summed together to get a vector which contains info about the token and also its position with in the sequence
- The result is followed by a linear layer which transforms the embedding vector into a vector with the required hidden dim size
- Then we pass the hidden vector into N convolutional blocks
- The vector then fed thru another linear layer to transform it back to the hidden dim size into the embedding dim size => this is the conved vector
- The conved vector is element wise summed with embedding vector via residual connection => this provide the combined vector for each token
CONVOLUTION-ENCODER:
- We will have 10 conv block with 1024 filters in each block
- The input sentence is padded because the convolutional layers will reduce the length of the input sentence and we want the length of the sentence coming into the conv block to be equal to the length of it coming out of the convolution.
- The filter is designed so that the output hidden dim of the filter is twice the input hidden dim
- We have to double the size of the hidden dim leaving the conv layer since the GLU - gated linear units have gating mechanism (similar to GRU and LSTM) contained with activation function and actually half the size of the hidden dim
- The result from GLU is now element wise summed with its own vector before it was passed thru the conv layer
IMPLEMENTATION:
- To make the implementation sime, we only allow for odd sized kernel, this allows padding to be added equally to both sides of the source sequence
- the positional embedding is initilaied to have a vocab of 100. This means it can handle sequences up to 100 elements long
DECODER:
- Takes in the actual target sentence and tries to predict it.
- This model differes from the RNN as it predicts all tokens within the target sentencein parallel
- First, the embeddings do not have a residual connection that connects after the convolutional blocks and the transformation. Instead the embeddings are fed into the convolutional blocks to be used as residual connections there.
- Second, to feed the decoder information from the encoder, the encoder conved and combined outputs are used - again, within the convolutional blocks.
- Finally, the output of the decoder is a linear layer from embedding dimension to output dimension. This is used make a prediction about what the next word in the translation should be.
CONVOLUTION-DECODER:
IMPLEMENTATION:
- As we only pad on one side the decoder is allowed to use both odd and even sized padding. Again, the scale is used to reduce variance throughout the model and the position embedding is initialized to have a "vocabulary" of 100.
- This model takes in the encoder representations in its forward method and both are passed to the calculate_attention method which calculates and applies attention. It also returns the actual attention values, but we are not currently using them.
"""
class Encoder(nn.Module):
def __init__(
self,
input_dim,
emb_dim,
hid_dim,
n_layers,
kernel_size,
dropout,
device,
max_length=100,
):
super().__init__()
assert kernel_size % 2 == 1, "Kernel size must be odd!"
self.device = device
self.scale = torch.sqrt(torch.FloatTensor([0.5])).to(device)
self.tok_embedding = nn.Embedding(input_dim, emb_dim)
self.pos_embedding = nn.Embedding(max_length, emb_dim)
self.emb2hid = nn.Linear(emb_dim, hid_dim)
self.hid2emb = nn.Linear(hid_dim, emb_dim)
self.convs = nn.ModuleList(
[
nn.Conv1d(
in_channels=hid_dim,
out_channels=2 * hid_dim,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
)
for _ in range(n_layers)
]
)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
# src = [batch size, src len]
batch_size = src.shape[0]
src_len = src.shape[1]
# create position tensor
pos = (
torch.arange(0, src_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
)
# pos = [0, 1, 2, 3, ..., src len - 1]
# pos = [batch size, src len]
# embed tokens and positions
tok_embedded = self.tok_embedding(src)
pos_embedded = self.pos_embedding(pos)
# tok_embedded = pos_embedded = [batch size, src len, emb dim]
# combine embeddings by elementwise summing
embedded = self.dropout(tok_embedded + pos_embedded)
# embedded = [batch size, src len, emb dim]
# pass embedded through linear layer to convert from emb dim to hid dim
conv_input = self.emb2hid(embedded)
# conv_input = [batch size, src len, hid dim]
# permute for convolutional layer
conv_input = conv_input.permute(0, 2, 1)
# conv_input = [batch size, hid dim, src len]
# begin convolutional blocks...
for i, conv in enumerate(self.convs):
# pass through convolutional layer
conved = conv(self.dropout(conv_input))
# conved = [batch size, 2 * hid dim, src len]
# pass through GLU activation function
conved = F.glu(conved, dim=1)
# conved = [batch size, hid dim, src len]
# apply residual connection
conved = (conved + conv_input) * self.scale
# conved = [batch size, hid dim, src len]
# set conv_input to conved for next loop iteration
conv_input = conved
# ...end convolutional blocks
# permute and convert back to emb dim
conved = self.hid2emb(conved.permute(0, 2, 1))
# conved = [batch size, src len, emb dim]
# elementwise sum output (conved) and input (embedded) to be used for attention
combined = (conved + embedded) * self.scale
# combined = [batch size, src len, emb dim]
return conved, combined
class Decoder(nn.Module):
def __init__(
self,
output_dim,
emb_dim,
hid_dim,
n_layers,
kernel_size,
dropout,
trg_pad_idx,
device,
max_length=100,
):
super().__init__()
self.kernel_size = kernel_size
self.trg_pad_idx = trg_pad_idx
self.device = device
self.scale = torch.sqrt(torch.FloatTensor([0.5])).to(device)
self.tok_embedding = nn.Embedding(output_dim, emb_dim)
self.pos_embedding = nn.Embedding(max_length, emb_dim)
self.emb2hid = nn.Linear(emb_dim, hid_dim)
self.hid2emb = nn.Linear(hid_dim, emb_dim)
self.attn_hid2emb = nn.Linear(hid_dim, emb_dim)
self.attn_emb2hid = nn.Linear(emb_dim, hid_dim)
self.fc_out = nn.Linear(emb_dim, output_dim)
self.convs = nn.ModuleList(
[
nn.Conv1d(
in_channels=hid_dim,
out_channels=2 * hid_dim,
kernel_size=kernel_size,
)
for _ in range(n_layers)
]
)
self.dropout = nn.Dropout(dropout)
def calculate_attention(self, embedded, conved, encoder_conved, encoder_combined):
# embedded = [batch size, trg len, emb dim]
# conved = [batch size, hid dim, trg len]
# encoder_conved = encoder_combined = [batch size, src len, emb dim]
# permute and convert back to emb dim
conved_emb = self.attn_hid2emb(conved.permute(0, 2, 1))
# conved_emb = [batch size, trg len, emb dim]
combined = (conved_emb + embedded) * self.scale
# combined = [batch size, trg len, emb dim]
energy = torch.matmul(combined, encoder_conved.permute(0, 2, 1))
# energy = [batch size, trg len, src len]
attention = F.softmax(energy, dim=2)
# attention = [batch size, trg len, src len]
attended_encoding = torch.matmul(attention, encoder_combined)
# attended_encoding = [batch size, trg len, emd dim]
# convert from emb dim -> hid dim
attended_encoding = self.attn_emb2hid(attended_encoding)
# attended_encoding = [batch size, trg len, hid dim]
# apply residual connection
attended_combined = (conved + attended_encoding.permute(0, 2, 1)) * self.scale
# attended_combined = [batch size, hid dim, trg len]
return attention, attended_combined
def forward(self, trg, encoder_conved, encoder_combined):
# trg = [batch size, trg len]
# encoder_conved = encoder_combined = [batch size, src len, emb dim]
batch_size = trg.shape[0]
trg_len = trg.shape[1]
# create position tensor
pos = (
torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
)
# pos = [batch size, trg len]
# embed tokens and positions
tok_embedded = self.tok_embedding(trg)
pos_embedded = self.pos_embedding(pos)
# tok_embedded = [batch size, trg len, emb dim]
# pos_embedded = [batch size, trg len, emb dim]
# combine embeddings by elementwise summing
embedded = self.dropout(tok_embedded + pos_embedded)
| |
**MaintenanceTrackName** *(string) --*
The name of the maintenance track that the cluster will change to during the next maintenance window.
- **EncryptionType** *(string) --*
The encryption type for a cluster. Possible values are: KMS and None. For the China region the possible values are None, and Legacy.
- **ClusterVersion** *(string) --*
The version ID of the Amazon Redshift engine that is running on the cluster.
- **AllowVersionUpgrade** *(boolean) --*
A boolean value that, if ``true`` , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
- **NumberOfNodes** *(integer) --*
The number of compute nodes in the cluster.
- **PubliclyAccessible** *(boolean) --*
A boolean value that, if ``true`` , indicates that the cluster can be accessed from a public network.
- **Encrypted** *(boolean) --*
A boolean value that, if ``true`` , indicates that data in the cluster is encrypted at rest.
- **RestoreStatus** *(dict) --*
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
- **Status** *(string) --*
The status of the restore action. Returns starting, restoring, completed, or failed.
- **CurrentRestoreRateInMegaBytesPerSecond** *(float) --*
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup.
- **SnapshotSizeInMegaBytes** *(integer) --*
The size of the set of snapshot data used to restore the cluster.
- **ProgressInMegaBytes** *(integer) --*
The number of megabytes that have been transferred from snapshot storage.
- **ElapsedTimeInSeconds** *(integer) --*
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish.
- **EstimatedTimeToCompletionInSeconds** *(integer) --*
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore.
- **DataTransferProgress** *(dict) --*
- **Status** *(string) --*
Describes the status of the cluster. While the transfer is in progress the status is ``transferringdata`` .
- **CurrentRateInMegaBytesPerSecond** *(float) --*
Describes the data transfer rate in MB's per second.
- **TotalDataInMegaBytes** *(integer) --*
Describes the total amount of data to be transfered in megabytes.
- **DataTransferredInMegaBytes** *(integer) --*
Describes the total amount of data that has been transfered in MB's.
- **EstimatedTimeToCompletionInSeconds** *(integer) --*
Describes the estimated number of seconds remaining to complete the transfer.
- **ElapsedTimeInSeconds** *(integer) --*
Describes the number of seconds that have elapsed during the data transfer.
- **HsmStatus** *(dict) --*
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
- **HsmClientCertificateIdentifier** *(string) --*
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
- **HsmConfigurationIdentifier** *(string) --*
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
- **Status** *(string) --*
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
- **ClusterSnapshotCopyStatus** *(dict) --*
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
- **DestinationRegion** *(string) --*
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
- **RetentionPeriod** *(integer) --*
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
- **ManualSnapshotRetentionPeriod** *(integer) --*
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
- **SnapshotCopyGrantName** *(string) --*
The name of the snapshot copy grant.
- **ClusterPublicKey** *(string) --*
The public key for the cluster.
- **ClusterNodes** *(list) --*
The nodes in the cluster.
- *(dict) --*
The identifier of a node in a cluster.
- **NodeRole** *(string) --*
Whether the node is a leader node or a compute node.
- **PrivateIPAddress** *(string) --*
The private IP address of a node within a cluster.
- **PublicIPAddress** *(string) --*
The public IP address of a node within a cluster.
- **ElasticIpStatus** *(dict) --*
The status of the elastic IP (EIP) address.
- **ElasticIp** *(string) --*
The elastic IP (EIP) address for the cluster.
- **Status** *(string) --*
The status of the elastic IP (EIP) address.
- **ClusterRevisionNumber** *(string) --*
The specific revision number of the database in the cluster.
- **Tags** *(list) --*
The list of tags for the cluster.
- *(dict) --*
A tag consisting of a name/value pair for a resource.
- **Key** *(string) --*
The key, or name, for the resource tag.
- **Value** *(string) --*
The value for the resource tag.
- **KmsKeyId** *(string) --*
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
- **EnhancedVpcRouting** *(boolean) --*
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide.
If this option is ``true`` , enhanced VPC routing is enabled.
Default: false
- **IamRoles** *(list) --*
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
- *(dict) --*
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
- **IamRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role, for example, ``arn:aws:iam::123456789012:role/RedshiftCopyUnload`` .
- **ApplyStatus** *(string) --*
A value that describes the status of the IAM role's association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
* ``in-sync`` : The role is available for use by the cluster.
* ``adding`` : The role is in the process of being associated with the cluster.
* ``removing`` : The role is in the process of being disassociated with the cluster.
- **PendingActions** *(list) --*
Cluster operations that are waiting to be started.
- *(string) --*
- **MaintenanceTrackName** *(string) --*
The name of the maintenance track for the cluster.
- **ElasticResizeNumberOfNodeOptions** *(string) --*
The number of nodes that you can resize the cluster to with the elastic resize method.
- **DeferredMaintenanceWindows** *(list) --*
Describes a group of ``DeferredMaintenanceWindow`` objects.
- *(dict) --*
Describes a deferred maintenance window
- **DeferMaintenanceIdentifier** *(string) --*
A unique identifier for the maintenance window.
- **DeferMaintenanceStartTime** *(datetime) --*
A timestamp for the beginning of the time period when we defer maintenance.
- **DeferMaintenanceEndTime** *(datetime) --*
A timestamp for the end of the time period when we defer maintenance.
- **SnapshotScheduleIdentifier** *(string) --*
A unique identifier for the cluster snapshot schedule.
- **SnapshotScheduleState** *(string) --*
The current state of the cluster snapshot schedule.
- **ResizeInfo** *(dict) --*
Returns the following:
* AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
* ResizeType: Returns ClassicResize
- **ResizeType** *(string) --*
Returns the value ``ClassicResize`` .
- **AllowCancelResize** *(boolean) --*
A boolean value indicating if the | |
<reponame>hu120051/cybercafe_management
from flask import Flask
from flask import render_template
from flask import request
import pymysql
import datetime
app = Flask(__name__)
app.config['SECRET_KEY'] = '123456'
@app.route('/') # 进入首页
def index():
return render_template('index.html')
# ########管理员端######## #
@app.route('/adminlogin/', methods=['GET', 'POST']) # 管理员登录页面
def adminlogin():
if request.method == 'GET':
return render_template('adminlogin.html')
else:
username = request.form['username']
password = request.form['password']
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur1 = db.cursor()
sql_select01 = '''SELECT Staff_ID FROM Staff WHERE Staff_ID=('%s') AND Password=('%s') ''' % (username, password)
cur1.execute(sql_select01)
return1 = cur1.fetchall()
if len(return1): # 对js和flask很不熟练,没想到更好的方法,这种方法只要管理员进入系统就要把所有表查一遍
cur1.execute('''select ca.Card_ID, cu.CName, ca.Password, ca.Using_Status, ca.Checkout_Status,
ca.Account_Balance FROM Card ca, Customer cu WHERE ca.C_ID = cu.C_ID ORDER BY ca.Card_ID asc''')
d = cur1.fetchall()
cur1.execute('''select o.Order_ID, o.Card_ID, o.Order_Time, s.SName, o.Quantity, o.Amount, o.Order_Status
FROM Order_T o, Snacks s WHERE o.S_ID = s.S_ID ORDER BY o.Order_ID desc''')
c = cur1.fetchall()
cur1.execute('''select * FROM Bill ORDER BY B_ID desc''')
b = cur1.fetchall()
cur1.execute('''select * FROM Computer ORDER BY PC_ID asc''')
a = cur1.fetchall()
cur1.execute('''select * FROM Snacks ''')
e = cur1.fetchall()
users = []
orders = []
bills = []
computers = []
snacks = []
# 录入users所有用户信息
for value in d:
data = {}
data['a'] = value[0]
data['b'] = value[1]
data['c'] = value[2]
data['d'] = value[3]
data['e'] = value[4]
data['f'] = value[5]
users.append(data)
# 录入orders所有零食订单信息
for value in c:
data = {}
data['a'] = value[0]
data['b'] = value[1]
data['c'] = value[2]
data['d'] = value[3]
data['e'] = value[4]
data['f'] = value[5]
data['g'] = value[6]
orders.append(data)
# 录入bills所有的上机账单信息
for value in b:
data = {}
data['a'] = value[0]
data['b'] = value[1]
data['c'] = value[2]
data['d'] = value[3]
data['e'] = value[4]
data['f'] = value[5]
data['g'] = value[6]
bills.append(data)
# 录入computers所有的上机账单信息
for value in a:
data = {}
data['a'] = value[0]
data['b'] = value[1]
data['c'] = value[2]
computers.append(data)
# 录入snacks所有的零食信息
for value in e:
data = {}
data['a'] = value[0]
data['b'] = value[1]
data['c'] = value[2]
data['d'] = value[3]
snacks.append(data)
return render_template('adminmain.html', users=users, orders=orders, bills=bills, computers=computers, snacks=snacks)
else:
return render_template("adminlogin.html", tips='用户名或密码错误')
# flash('username or password is wrong')
# return render_template("adminlogin.html")
@app.route('/addusers/', methods=['GET', 'POST']) # 在系统里面添加新用户
def addusers():
if request.method == 'GET':
return render_template('addusers.html')
else:
c_id = request.form['CustomerID']
cname = request.form['Cname']
age = request.form['Age']
gender = request.form['Gender']
card_id = request.form['Card_ID']
password = u'<PASSWORD>' # 初始密码均为<PASSWORD>
account_balance = request.form['Account_Balance']
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_insert1 = '''INSERT INTO Customer VALUES ('%s', '%s', '%s', '%s')''' % (c_id, cname, age, gender)
sql_insert2 = '''INSERT INTO Card VALUES ('%s','%s','Free','Paid','%s','%s')''' \
% (card_id, password, account_balance, c_id)
cur.execute(sql_insert1)
cur.execute(sql_insert2)
db.commit()
db.close()
return render_template('close.html')
@app.route('/changecharge/', methods=['GET', 'POST']) # 充值/退款
def changecharge():
if request.method == 'GET':
return render_template('changecharge.html')
else:
card_id = request.form['CardID']
change = int(request.form['Change'])
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_insert1 = '''UPDATE Card SET Account_Balance = (Account_Balance+(%d)) WHERE Card_ID= '%s' ''' % (change, card_id)
cur.execute(sql_insert1)
print(sql_insert1)
db.commit()
db.close()
return render_template('close.html')
@app.route('/updateusers/', methods=['GET', 'POST']) # 在系统里面更新用户信息
def updateusers():
if request.method == 'GET':
return render_template('updateusers.html')
else:
card_id = request.form['CardID']
cname = request.form['Cname']
age = request.form['Age']
gender = request.form['Gender']
password = request.form['Password']
using_status = request.form['Using_Status']
checkout_status = request.form['Checkout_Status']
account_balance = request.form['Account_Balance']
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_insert1 = '''UPDATE Customer SET Cname='%s', Age='%s', Gender='%s' WHERE (C_ID=(SELECT C_ID FROM Card
WHERE Card_ID='%s')) ''' % (cname, age, gender, card_id)
sql_insert2 = '''UPDATE Card SET Password='%s', Using_Status='%s', Checkout_Status='%s', Account_Balance='%s'
WHERE (Card_ID='%s') ''' % (password, using_status, checkout_status, account_balance, card_id)
cur.execute(sql_insert1)
cur.execute(sql_insert2)
db.commit()
db.close()
return render_template('close.html')
@app.route('/deleteuser/<id>', methods=['GET', 'POST']) # 删除系统中的用户
def deleteuser(id):
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_select1 = '''SELECT C_ID FROM Card WHERE Card_ID='%s' ''' % id
cur.execute(sql_select1)
c = cur.fetchone()
cid = c[0]
sql_delete1 = '''DELETE FROM Customer WHERE C_ID='%s' ''' % cid
sql_delete2 = '''DELETE FROM Card WHERE Card_ID = '%s' ''' % id
# 由于card表里C_ID是外键,参考Customer表,所以删除时应先删除有外键的值
cur.execute(sql_delete2)
cur.execute(sql_delete1)
db.commit()
db.close()
return render_template('adminmain.html')
@app.route('/finishorder/<id>', methods=['GET', 'POST']) # 完成零食订单(完成时不扣钱,结算账单时统一结算)
def finishorder(id):
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_insert1 = '''UPDATE Order_T SET Order_Status = 'Finished' WHERE Order_ID='%s' ''' % id
print(sql_insert1)
cur.execute(sql_insert1)
db.commit()
db.close()
return render_template('adminmain.html')
@app.route('/deleteorder/<id>', methods=['GET', 'POST']) # 删除零食订单
def deleteorder(id):
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_insert1 = '''delete FROM Order_T WHERE Order_ID='%s' ''' % id
print(sql_insert1)
cur.execute(sql_insert1)
db.commit()
db.close()
return render_template('adminmain.html')
@app.route('/addbills/', methods=['GET', 'POST']) # 添加上机账单
def addbills():
if request.method == 'GET':
return render_template('addbills.html')
else:
starttime = request.form['Start_Time']
card_id = request.form['Card_ID']
pc_id = request.form['PC_ID']
staff_id = request.form['Staff_ID']
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
cur.execute('''SELECT MAX(B_ID) FROM Bill''')
num = cur.fetchone()
bill_id = num[0]+1 #bill_id依次自动生成
sql_insert1 = '''INSERT INTO Bill VALUES ('%s', '%s', null, null, '%s', '%s' ,'%s')''' \
% (bill_id, starttime, card_id, pc_id, staff_id)
sql_update1 = '''UPDATE Card SET Using_Status='Using', Checkout_Status='Unpaid' WHERE (Card_ID='%s')''' \
% card_id
sql_update2 = '''UPDATE Computer SET Card_ID='%s' WHERE (PC_ID='%s')''' % (card_id, pc_id)
'''
insert1:在bill表中添加新订单,结束时间和结算金额在结账时由管理员操作
update1:在card表中将此卡使用状态设置为使用中,结算状态设置为未结算
update2:将computer表中此计算机的使用用户设置为此卡
'''
cur.execute(sql_insert1)
cur.execute(sql_update1)
cur.execute(sql_update2)
db.commit()
db.close()
return render_template('close.html')
@app.route('/finishbills/', methods=['GET', 'POST']) # 结算账单
def finishbills():
if request.method == 'GET':
return render_template('finishbills.html')
else:
bill_id = request.form['Bill_ID']
endtime = request.form['End_Time']
total_amount = request.form['Total_Amount']
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='hhy123', db='cybercafe')
cur = db.cursor()
'''
结算对三个表进行处理:
1.会员卡状态改为空闲,结算状态改为已结算,余额扣费
2.账单表完善下机时间,总金额信息
3.电脑表将使用状态的计算机的使用卡号改为null,即计算机空闲
'''
sql_update1 = '''UPDATE Card SET Using_Status='Free', Checkout_Status='Paid', Account_Balance=
(Account_Balance-%s) WHERE Card_ID=(SELECT Card_ID FROM Bill WHERE B_ID = '%s')''' % (total_amount, bill_id)
sql_update2 = '''UPDATE Bill SET End_Time='%s', Total_Amount='%s' WHERE (B_ID='%s')''' \
% (endtime, total_amount, bill_id)
sql_update3 = '''UPDATE Computer SET Card_ID=null WHERE Card_ID=
(SELECT Card_ID FROM Bill WHERE B_ID = '%s')''' % bill_id
cur.execute(sql_update1)
cur.execute(sql_update2)
cur.execute(sql_update3)
db.commit()
db.close()
return render_template('close.html')
@app.route('/addcomputer/', methods=['GET', 'POST']) # 添加计算机
def addcomputer():
if request.method == 'GET':
return render_template('addcomputer.html')
else:
computer_id = request.form['Computer_ID']
price = request.form['Price_Per_Hour']
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_insert1 = '''INSERT INTO Computer VALUES ('%s', '%s', null)''' % (computer_id, price)
cur.execute(sql_insert1)
db.commit()
db.close()
return render_template('close.html')
@app.route('/changeadpwd/', methods=['GET', 'POST']) # 修改管理员密码
def changeadpwd():
if request.method == 'GET':
return render_template('adminmain.html')
else:
staff_id = request.form['Staff_ID']
opwd = request.form['Old_Pwd']
npwd = request.form['New_Pwd']
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_update1 = '''UPDATE Staff SET Password='%s' WHERE Staff_ID='%s' AND Password='%s' ''' % (npwd, staff_id, opwd)
print(sql_update1)
cur.execute(sql_update1)
db.commit()
db.close()
return render_template('adminmain.html')
@app.route('/deletesnack/<id>', methods=['GET', 'POST']) # 删除零食信息
def deletesnack(id):
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_delete1 = '''delete FROM Snacks WHERE S_ID='%s' ''' % id
cur.execute(sql_delete1)
db.commit()
db.close()
return render_template('adminmain.html')
@app.route('/changesnack/', methods=['GET', 'POST']) # 修改零食信息
def changesnack():
if request.method == 'GET':
return render_template('adminmain.html')
else:
s_id = request.form['Snack_ID']
sname = request.form['SName']
sprice = request.form['SPrice']
snack_status = request.form['Snack_Status']
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_update1 = '''UPDATE Snacks SET SName='%s', SPrice='%s', Snack_Status='%s' WHERE S_ID='%s' ''' \
% (sname, sprice, snack_status, s_id)
print(sql_update1)
cur.execute(sql_update1)
db.commit()
db.close()
return render_template('adminmain.html')
@app.route('/addsnack/', methods=['GET', 'POST']) # 添加零食
def addsnack():
if request.method == 'GET':
return render_template('addsnack.html')
else:
s_id = request.form['Snack_ID']
sname = request.form['SName']
sprice = request.form['SPrice']
snack_status = request.form['Snack_Status']
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur = db.cursor()
sql_insert1 = '''INSERT INTO Snacks VALUES ('%s', '%s', '%s', '%s')''' % (s_id, sname, sprice, snack_status)
cur.execute(sql_insert1)
db.commit()
db.close()
return render_template('close.html')
# ########用户端######## #
@app.route('/userlogin/', methods=['GET', 'POST']) # 用户端
def userlogin():
if request.method == 'GET':
return render_template('userlogin.html')
else:
username = request.form['username']
password = request.form['password']
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='<PASSWORD>', db='cybercafe')
cur2 = db.cursor()
sql_select02 = '''SELECT Card_ID FROM Card WHERE Card_ID=('%s') AND Password=('%s')''' % (username, password)
cur2.execute(sql_select02)
return2 = cur2.fetchall()
if len(return2):
sql_select01 = '''select ca.Card_ID, cu.CName, ca.Using_Status, ca.Checkout_Status, ca.Account_Balance
FROM Card ca, Customer cu WHERE ca.C_ID=cu.C_ID AND Card_ID='%s' ''' % username
cur2.execute(sql_select01)
k = cur2.fetchall()
sql_select03 = '''select * FROM Bill WHERE Card_ID='%s' ORDER BY B_ID desc''' % username
cur2.execute(sql_select03)
j = cur2.fetchall()
sql_select04 = '''select * FROM Snacks WHERE Snack_Status='Onsale' ORDER BY S_ID asc'''
cur2.execute(sql_select04)
l = cur2.fetchall()
member = []
bills = []
snacks = []
for value in k:
data = {}
data['a'] = value[0]
data['b'] = value[1]
data['c'] = value[2]
data['d'] = value[3]
data['e'] = value[4]
member.append(data)
for value in j:
data = {}
data['a'] = value[0]
data['b'] = value[1]
data['c'] = value[2]
data['d'] = value[3]
data['e'] = value[4]
data['f'] = value[5]
data['g'] = value[6]
bills.append(data)
for value in l:
data = {}
data['a'] = value[0]
| |
default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_rel_fk_put_with_http_info(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for tags (required)
:param DesignTag data:
:return: DesignTag
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_tags_rel_fk_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_tags_rel_fk_put`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_tags_rel_fk_put`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_designs_nk_tags_rel_fk_put`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/tags/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DesignTag',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_team_get(self, id, nk, **kwargs):
"""
Fetches belongsTo relation team.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_team_get(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param bool refresh:
:return: Team
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_team_get_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_team_get_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_team_get_with_http_info(self, id, nk, **kwargs):
"""
Fetches belongsTo relation team.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_team_get_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param bool refresh:
:return: Team
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_team_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_team_get`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_team_get`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/team'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Team',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_template_get(self, id, nk, **kwargs):
"""
Fetches belongsTo relation template.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_template_get(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param bool refresh:
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_template_get_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_template_get_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_template_get_with_http_info(self, id, nk, **kwargs):
"""
Fetches belongsTo relation template.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_template_get_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param bool refresh:
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_template_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_template_get`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_template_get`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/template'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Template',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_post(self, id, **kwargs):
"""
Creates a new instance in designs of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_post(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param Design data:
:return: Design
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_post_with_http_info(id, **kwargs)
else:
(data) = self.portals_id_designs_post_with_http_info(id, **kwargs)
return data
def portals_id_designs_post_with_http_info(self, id, **kwargs):
"""
Creates a new instance in designs of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_post_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param Design data:
:return: Design
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got | |
# MIT License
#
# Copyright (c) 2019 TU Delft Embedded and Networked Systems Group/
# Sustainable Systems Laboratory.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Script used in generating results for the following paper
#
# @inproceedings{kortbeek_asplos2020,
# author = "Vito {Kortbeek} and <NAME> {Yildirim} and Abu {Bakar} and Jacob {Sorber}
# and Josiah {Hester} and Przemys{\l}}aw {Pawe{\l}czak}",
# title = "Time-sensitive Intermittent Computing Meets Legacy Software",
# year = "2020",
# booktitle = "Proc. ASPLOS",
# address = "Lausanne, Switzerland",
# month = mar # " 16--20,",
# publisher = "ACM"
# }
from pathlib import Path
from scipy import stats
import pandas as pd
import re
import matplotlib.pyplot as plt
import numpy as np
import sys
# Read file of survey results
results_folder = Path("../user-study-results/surveygizmo")
results_file = results_folder / "20190109034400-SurveyExport_anonymized.csv" # results used in [kortbeek_asplos2020]
results_data = pd.read_csv(results_file, keep_default_na=False) # replace empty fields with "-"
#results_data = results_data[results_data['Status'] == 'Complete'] # Remove incomplete responses
print('---------------------') # program begin marker
try:
results_data['Extended Referer']
except:
# Surveygizmo CSV export does not contain "Extended Referer" and "Extended User Agent" fields after 4 December 2019
# add these here, as columns are addressed by numbers, not by names later on
results_data.insert(8, "Extended Referer", results_data['Referer'])
results_data.insert(12, "Extended User Agent", results_data['User Agent'])
# Read file of MTurk results
# Note: all results - fron MTurk and non-MTurk users were used in ASPLOS 2020 paper [kortbeek_asplos2020];
# feel free to experiment by chosing different uses cohorts
mturk_folder = Path("../user-study-results/mturk")
mturk_file = mturk_folder / "Batch_3492166_batch_results_anonymized.csv"
mturk_data = pd.read_csv(mturk_file, keep_default_na=False) # replace empty fields with "-"
# Use only MTurk results
# results_data = results_data[results_data['Confirmation code'].isin(list(set(mturk_data['Answer.surveycode'])))]
# Exclude MTurk results
# results_data = results_data[~results_data['Confirmation code'].isin(list(set(mturk_data['Answer.surveycode'])))]
# General stats
no_responses = len(results_data.iloc[0:,1]) # pick any random column to measure number of respondents
print("No. responses:", no_responses)
print("")
# Find information about countries of respondents
unique_countries = sorted(list(set(results_data['Country'].sort_values())))
no_unique_countries = len(unique_countries)
print("Countries:", unique_countries)
print("No. Countries:", no_unique_countries)
print("")
# Find information about cities of respondents
unique_cities = sorted(list(set(results_data['City'].sort_values())))
no_unique_cities = len(unique_cities)
print("Cities:", unique_cities)
print("No. Cities:", no_unique_cities)
print("")
ind_time_explanation_noutl = results_data[results_data['Time spend on explanation'] <
results_data['Time spend on explanation'].mean() +
3 * results_data['Time spend on explanation'].std()].index.tolist()
# Find information about time spent on reading `Explanation` section of the survey
time_explanation_avg = results_data['Time spend on explanation'][ind_time_explanation_noutl].mean()/60
time_explanation_std = results_data['Time spend on explanation'][ind_time_explanation_noutl].std()/60
time_explanation_min = results_data['Time spend on explanation'][ind_time_explanation_noutl].min()/60
time_explanation_max = results_data['Time spend on explanation'][ind_time_explanation_noutl].max()/60
print("Avg Time Spent on `Explanation` (s):", time_explanation_avg)
print("Std Time Spent on `Explanation` (s):", time_explanation_std)
print("")
# Find information about finding bug in `swap' program with `checkpoints`
correct_line_swap_c = 'Line 9' # correct line for `swap' program with `checkpoints`
line_swap_c = results_data.iloc[0:,23] # address by column number, not by name - name is too long
# (Note: works for surveygizmo exports before 4 December 2019 -
# "Extended referer" and "Extended user agent" column does not exist any more)
no_correct_line_swap_c = len([x for x in list(line_swap_c) if correct_line_swap_c in x]) # number of correct responses
print("No. Correct Responses (Swap/Chechpoint):", no_correct_line_swap_c)
print("Correct Responses (Swap/Chechpoint) (%):", no_correct_line_swap_c / no_responses * 100)
correct_expr_swap_c = 'a=a-b' # correct expression for `swap' program with `checkpoints`
expr_swap_c = list(results_data.iloc[0:,24]) # address by column number, not by name - name is too long
# (Note: see comment for line_swap_c eariler)
no_correct_expr_swap_c = 0
for i in range(0, len(expr_swap_c)):
expr_swap_c[i] = expr_swap_c[i].replace(' ', '') # remove spaces
expr_swap_c[i] = expr_swap_c[i].replace(';', '') # remove ";" (we assume to accept missing ";")
if bool(re.search(correct_expr_swap_c, expr_swap_c[i])):
no_correct_expr_swap_c = no_correct_expr_swap_c + 1
print("No. Correct Expressions (Swap/Chechpoint):", no_correct_line_swap_c)
print("Correct Expressions (Swap/Chechpoint) (%):", no_correct_line_swap_c / no_responses * 100)
# Find all values except outlier
ind_swap_c_noutl = results_data[results_data['Time spent swap checkpoint'] <
results_data['Time spent swap checkpoint'].mean() +
3 * results_data['Time spent swap checkpoint'].std()].index.tolist()
time_swap_c_avg = results_data['Time spent swap checkpoint'][ind_swap_c_noutl].mean()/60
time_swap_c_std = results_data['Time spent swap checkpoint'][ind_swap_c_noutl].std()/60
time_swap_c_min = results_data['Time spent swap checkpoint'][ind_swap_c_noutl].min()/60
time_swap_c_max = results_data['Time spent swap checkpoint'][ind_swap_c_noutl].max()/60
print("Avg Time Spent on `Swap/Chechpoint` (s):", time_swap_c_avg)
print("Std Time Spent on `Swap/Chechpoint` (s):", time_swap_c_std)
print("")
# # Find information about finding bug in `swap' program with `tasks`
correct_line_swap_t = 'Line 25' # correct line for `swap' program with `tasks`
line_swap_t = results_data.iloc[0:,26] # address by column number, not by name - name is too long
# (Note: see comment for line_swap_c eariler)
no_correct_line_swap_t = len([x for x in list(line_swap_t) if correct_line_swap_t in x]) # number of correct responses
print("No. Correct Responses (Swap/Task):", no_correct_line_swap_t)
print("Correct Responses (Swap/Task) (%):", no_correct_line_swap_t / no_responses * 100)
correct_expr_swap_t = 'SET\(a,GET\(a\)-GET\(b\)\)' # correct expression for `swap' program with `tasks`
expr_swap_t = list(results_data.iloc[0:,27]) # address by column number, not by name - name is too long
# (Note: see comment for line_swap_c eariler)
no_correct_expr_swap_t = 0
for i in range(0, len(expr_swap_t)):
expr_swap_t[i] = expr_swap_t[i].replace(' ', '') # remove spaces
expr_swap_t[i] = expr_swap_t[i].replace(';', '') # remove ";" (we assume to accept missing ";")
if bool(re.search(correct_expr_swap_t, expr_swap_t[i], re.IGNORECASE)):
no_correct_expr_swap_t = no_correct_expr_swap_t + 1
print("No. Correct Expressions (Swap/Task):", no_correct_expr_swap_t)
print("Correct Expressions (Swap/Task) (%):", no_correct_expr_swap_t / no_responses * 100)
# Find all values except outlier
ind_swap_t_noutl = results_data[results_data['Time spent swap task'] <
results_data['Time spent swap task'].mean() +
3 * results_data['Time spent swap task'].std()].index.tolist()
time_swap_t_avg = results_data['Time spent swap task'][ind_swap_t_noutl].mean()/60
time_swap_t_std = results_data['Time spent swap task'][ind_swap_t_noutl].std()/60
time_swap_t_min = results_data['Time spent swap task'][ind_swap_t_noutl].min()/60
time_swap_t_max = results_data['Time spent swap task'][ind_swap_t_noutl].max()/60
print("Avg Time Spent on `Swap/Task` (s):", time_swap_t_avg)
print("Std Time Spent on `Swap/Task` (s):", time_swap_t_std)
print("")
# Find information about finding bug in `Bubble sort' program with `checkpoints`
correct_line_bubble_c = 'Line 16' # correct line for `swap' program with `checkpoints`
line_bubble_c = results_data.iloc[0:,29] # address by column number, not by name - name is too long
# (Note: see comment for line_swap_c eariler)
no_correct_line_bubble_c = len([x for x in list(line_bubble_c) if correct_line_bubble_c in x]) # number of correct responses
print("No. Correct Responses (Bubble/Chechpoint):", no_correct_line_bubble_c)
print("Correct Responses (Bubble/Chechpoint) (%):", no_correct_line_bubble_c / no_responses * 100)
correct_expr_bubble_c = 'i\+\+' # correct expression for `swap' program with `checkpoints`
expr_bubble_c = list(results_data.iloc[0:,30]) # address by column number, not by name - name is too long
no_correct_expr_bubble_c = 0
for i in range(0, len(expr_bubble_c)):
expr_bubble_c[i] = expr_bubble_c[i].replace(' ', '') # remove spaces
expr_bubble_c[i] = expr_bubble_c[i].replace(';', '') # remove ";" (we assume to accept missing ";")
if bool(re.search(correct_expr_bubble_c, expr_bubble_c[i])):
no_correct_expr_bubble_c = no_correct_expr_bubble_c + 1
print("No. Correct Expressions (Bubble/Chechpoint):", no_correct_line_bubble_c)
print("Correct Expressions (Bubble/Chechpoint) (%):", no_correct_line_bubble_c / no_responses * 100)
# Find all values except outlier
ind_bubble_c_noutl = results_data[results_data['Time spent bubble sort checkpoint'] <
results_data['Time spent bubble sort checkpoint'].mean() +
3 * results_data['Time spent bubble sort checkpoint'].std()].index.tolist()
time_bubble_c_avg = results_data['Time spent bubble sort checkpoint'][ind_bubble_c_noutl].mean()/60
time_bubble_c_std = results_data['Time spent bubble sort checkpoint'][ind_bubble_c_noutl].std()/60
time_bubble_c_min = results_data['Time spent bubble sort checkpoint'][ind_bubble_c_noutl].min()/60
time_bubble_c_max = results_data['Time spent bubble sort checkpoint'][ind_bubble_c_noutl].max()/60
print("Avg Time Spent on `Bubble/Chechpoint` (s):", time_bubble_c_avg)
print("Std Time Spent on `Bubble/Chechpoint` (s):", time_bubble_c_std)
print("")
# Find information about finding bug in `Bubble sort' program with `tasks`
correct_line_bubble_t = 'Line 48' # correct line for `swap' program with `checkpoints`
line_bubble_t = results_data.iloc[0:,32] # address by column number, not by name - name is too long
no_correct_line_bubble_t = len([x for x in list(line_bubble_t) if correct_line_bubble_t in x]) # number of correct responses
print("No. Correct Responses (Bubble/Task):", no_correct_line_bubble_t)
print("Correct Responses (Bubble/Task) (%):", no_correct_line_bubble_t / no_responses * 100)
correct_expr_bubble_t = 'returntask_array_loop_incr' # correct expression for `swap' program with `checkpoints`
expr_bubble_t = list(results_data.iloc[0:,33]) # address by column number, not by name - name is too long
no_correct_expr_bubble_t = 0
for i in range(0, len(expr_bubble_t)):
expr_bubble_t[i] = expr_bubble_t[i].replace(' ', '') # remove spaces
expr_bubble_t[i] = expr_bubble_t[i].replace(';', '') # remove ";" (we assume to accept missing ";")
if bool(re.search(correct_expr_bubble_t, expr_bubble_t[i])):
no_correct_expr_bubble_t = no_correct_expr_bubble_t + 1
print("No. Correct Expressions (Bubble/Task):", no_correct_line_bubble_t)
print("Correct Expressions (Bubble/Task) (%):", no_correct_line_bubble_t / no_responses * 100)
# Find all values except outlier
ind_bubble_t_noutl = results_data[results_data['Time spent bubble sort task'] <
results_data['Time spent bubble sort task'].mean() +
3 * results_data['Time spent bubble | |
will
be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys "x" and/or "y".
Each of these keys can have the same values as described before
for this whole parameter (`scale`). Using a dictionary allows to
set different values for the axis. If they are set to the same
ranges, different values may still be sampled per axis.
translate_percent : float or tuple of two floats or StochasticParameter or dict {"x": float/tuple/StochasticParameter, "y": float/tuple/StochasticParameter}, optional(default=1.0)
Translation in percent relative to the image
height/width (x-translation, y-translation) to use,
where 0 represents no change and 0.5 is half of the image
height/width.
* If a single float, then that value will be used for all images.
* If a tuple (a, b), then a value will be sampled from the range
a <= x <= b per image. That percent value will be used identically
for both x- and y-axis.
* If a StochasticParameter, then from that parameter a value will
be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys "x" and/or "y".
Each of these keys can have the same values as described before
for this whole parameter (`translate_percent`).
Using a dictionary allows to set different values for the axis.
If they are set to the same ranges, different values may still
be sampled per axis.
translate_px : int or tuple of two ints or StochasticParameter or dict {"x": int/tuple/StochasticParameter, "y": int/tuple/StochasticParameter}, optional(default=1.0)
Translation in
pixels.
* If a single int, then that value will be used for all images.
* If a tuple (a, b), then a value will be sampled from the discrete
range [a .. b] per image. That number will be used identically
for both x- and y-axis.
* If a StochasticParameter, then from that parameter a value will
be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys "x" and/or "y".
Each of these keys can have the same values as described before
for this whole parameter (`translate_px`).
Using a dictionary allows to set different values for the axis.
If they are set to the same ranges, different values may still
be sampled per axis.
rotate : float or int or tuple of two floats/ints or StochasticParameter, optional(default=0)
Rotation in degrees (NOT radians), i.e. expected value range is
0 to 360 for positive rotations (may also be negative).
* If a float/int, then that value will be used for all images.
* If a tuple (a, b), then a value will be sampled per image from the
range a <= x <= b and be used as the rotation value.
* If a StochasticParameter, then this parameter will be used to
sample the rotation value per image.
shear : float or int or tuple of two floats/ints or StochasticParameter, optional(default=0)
Shear in degrees (NOT radians), i.e. expected value range is
0 to 360 for positive shear (may also be negative).
* If a float/int, then that value will be used for all images.
* If a tuple (a, b), then a value will be sampled per image from the
range a <= x <= b and be used as the rotation value.
* If a StochasticParameter, then this parameter will be used to
sample the shear value per image.
order : int or iterable of int or ia.ALL or StochasticParameter, optional(default=1)
Interpolation order to use. Same meaning as in
skimage:
* 0: Nearest-neighbor
* 1: Bi-linear (default)
* 2: Bi-quadratic (not recommended by skimage)
* 3: Bi-cubic
* 4: Bi-quartic
* 5: Bi-quintic
Method 0 and 1 are fast, 3 is a bit slower, 4 and 5 are very
slow.
* If a single int, then that order will be used for all images.
* If an iterable, then for each image a random value will be sampled
from that iterable (i.e. list of allowed order values).
* If ia.ALL, then equivalant to list [0, 1, 3, 4, 5].
* If StochasticParameter, then that parameter is queried per image
to sample the order value to use.
cval : number or tuple of two number or ia.ALL or StochasticParameter, optional(default=0)
The constant value used for skimage's transform function.
This is the value used to fill up pixels in the result image that
didn't exist in the input image (e.g. when translating to the left,
some new pixels are created at the right). Such a fill-up with a
constant value only happens, when `mode` is "constant".
For standard uint8 images (value range 0-255), this value may also
come from the range 0-255. It may be a float value, even for
integer image dtypes.
* If this is a single int or float, then that value will be used
(e.g. 0 results in black pixels).
* If a tuple (a, b), then a random value from the range a <= x <= b
is picked per image.
* If ia.ALL, a value from the discrete range [0 .. 255] will be
sampled per image.
* If a StochasticParameter, a new value will be sampled from the
parameter per image.
mode : string or list of string or ia.ALL or StochasticParameter, optional(default="constant")
Parameter that defines the handling of newly created pixels.
Same meaning as in skimage (and numpy.pad):
* "constant": Pads with a constant value
* "edge": Pads with the edge values of array
* "symmetric": Pads with the reflection of the vector mirrored
along the edge of the array.
* "reflect": Pads with the reflection of the vector mirrored on
the first and last values of the vector along each axis.
* "wrap": Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the end values
are used to pad the beginning.
The datatype of the parameter may
be:
* If a single string, then that mode will be used for all images.
* If a list of strings, then per image a random mode will be picked
from that list.
* If ia.ALL, then a random mode from all possible modes will be
picked.
* If StochasticParameter, then the mode will be sampled from that
parameter per image, i.e. it must return only the above mentioned
strings.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Affine(scale=2.0)
zooms all images by a factor of 2.
>>> aug = iaa.Affine(translate_px=16)
translates all images on the x- and y-axis by 16 pixels (to the
right/top), fills up any new pixels with zero (black values).
>>> aug = iaa.Affine(translate_percent=0.1)
translates all images on the x- and y-axis by 10 percent of their
width/height (to the right/top), fills up any new pixels with zero
(black values).
>>> aug = iaa.Affine(rotate=35)
rotates all images by 35 degrees, fills up any new pixels with zero
(black values).
>>> aug = iaa.Affine(shear=15)
rotates all images by 15 degrees, fills up any new pixels with zero
(black values).
>>> aug = iaa.Affine(translate_px=(-16, 16))
translates all images on the x- and y-axis by a random value
between -16 and 16 pixels (to the right/top) (same for both axis, i.e.
sampled once per image), fills up any new pixels with zero (black values).
>>> aug = iaa.Affine(translate_px={"x": (-16, 16), "y": (-4, 4)})
translates all images on the x-axis by a random value
between -16 and 16 pixels (to the right) and on the y-axis by a
random value between -4 and 4 pixels to the top. Even if both ranges
| |
== 0
return_node = expand_func(game, parent)
assert len(botbowl.D6.FixedRolls) == 0
game.revert(parent.step_nbr)
return return_node
try:
with only_fixed_rolls(game):
game.step()
except AttributeError as e:
raise e
action_node = ActionNode(game, parent)
game.revert(parent.step_nbr)
assert parent.step_nbr == game.get_step()
return action_node
def expand_throw_in(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
active_proc: procedures.ThrowIn = game.get_procedure()
assert type(active_proc) is procedures.ThrowIn
d6_fixes = []
d3_fixes = [2] # direction roll
if game.config.throw_in_dice == "2d6":
d6_fixes = [3, 4]
elif game.config.throw_in_dice == "d6":
d6_fixes = [4]
elif game.config.throw_in_dice == "d3":
d3_fixes.append = [1] # distance roll is sampled after direction roll
with only_fixed_rolls(game, d3=d3_fixes, d6=d6_fixes):
game.step()
assert active_proc is not game.get_procedure()
return expand_none_action(game, parent)
def expand_bounce(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
active_proc: procedures.Bounce = game.get_procedure()
assert type(active_proc) is procedures.Bounce
new_parent = ChanceNode(game, parent)
ball_pos = active_proc.piece.position
# todo: consider ball bouncing out.
sq_to_num_tz = {}
for sq in game.get_adjacent_squares(ball_pos, occupied=False, out=True):
if sq.out_of_bounds:
sq_to_num_tz[sq] = 'out'
else:
home_tz = len(game.get_adjacent_players(sq, team=game.state.home_team, standing=True))
away_tz = len(game.get_adjacent_players(sq, team=game.state.away_team, standing=True))
sq_to_num_tz[sq] = (home_tz, away_tz)
num_squares = len(sq_to_num_tz)
if not (num_squares > 0):
raise AssertionError(f"num_squares should be non-zero! ball_pos={ball_pos}")
num_tz_to_sq = {}
for sq, num_tz in sq_to_num_tz.items():
num_tz_to_sq.setdefault(num_tz, []).append(sq)
for num_tz, count in collections.Counter(sq_to_num_tz.values()).items():
possible_squares = num_tz_to_sq[num_tz]
square = np.random.choice(possible_squares, 1)[0]
roll = botbowl.D8.d8_from_xy[(square.x - ball_pos.x, square.y - ball_pos.y)]
expand_with_fixes(game, new_parent, probability=count / num_squares, d8=[roll])
assert game.get_step() == new_parent.step_nbr
sum_prob = sum(new_parent.child_probability)
# new_parent.child_probability = [prob/sum_prob for prob in new_parent.child_probability]
assert sum(new_parent.child_probability) == approx(1.0, abs=1e-9)
assert game.get_step() == new_parent.step_nbr
return new_parent
def expand_pickup(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
active_proc: procedures.Pickup = game.get_procedure()
assert type(active_proc) is procedures.Pickup
assert active_proc.roll is None
probability_success = game.get_pickup_prob(active_proc.player, active_proc.ball.position)
new_parent = ChanceNode(game, parent)
# SUCCESS SCENARIO
with only_fixed_rolls(game, d6=[6]):
game.step()
success_node = expand_none_action(game, new_parent, pickup_handled=True)
new_parent.connect_child(success_node, probability_success)
assert game.get_step() == new_parent.step_nbr
# FAILURE SCENARIO
fixes = [1]
if active_proc.player.has_skill(Skill.SURE_HANDS):
fixes.append(1)
with only_fixed_rolls(game, d6=fixes):
while len(botbowl.D6.FixedRolls) > 0:
game.step()
fail_node = expand_none_action(game, new_parent, pickup_handled=True)
new_parent.connect_child(fail_node, 1 - probability_success)
assert game.get_step() == new_parent.step_nbr
return new_parent
def expand_moving(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
active_proc: Union[procedures.GFI, procedures.Dodge] = game.get_procedure()
assert type(active_proc) is procedures.Dodge or type(active_proc) is procedures.GFI
move_action_proc: procedures.MoveAction = first(proc for proc in reversed(game.state.stack.items)
if isinstance(proc, procedures.MoveAction))
is_blitz = type(move_action_proc) is procedures.BlitzAction
is_handoff = type(move_action_proc) is procedures.HandoffAction
player = move_action_proc.player
if move_action_proc.steps is not None:
final_step = move_action_proc.steps[-1]
else:
if is_blitz:
block_proc: procedures.Block = first(
filter(lambda proc: type(proc) is procedures.Block, game.state.stack.items))
final_step = block_proc.defender.position
elif is_handoff:
raise ValueError()
else:
final_step = active_proc.position
is_pickup = game.get_ball().position == final_step and not game.get_ball().is_carried
path = move_action_proc.paths[final_step]
if len(path.rolls) != len(path.steps):
raise AssertionError("wrong!")
"""
This block of code sets two important variables:
probability_success - probability of the remaining path
rolls - list[int] - the remaining rolls of the path
Normal case we just fetch this from the path object. If we're in a rerolled proc, it's nasty...
"""
if active_proc.roll is None:
probability_success = path.prob
rolls = list(collapse(path.rolls))
if is_pickup:
# remove the pickup roll and probability
rolls.pop()
probability_success /= game.get_pickup_prob(active_proc.player, final_step)
else:
with only_fixed_rolls(game):
game.step()
new_proc = game.get_procedure()
if type(new_proc) not in {procedures.GFI, procedures.Dodge}:
assert not active_proc.reroll.use_reroll
return expand_none_action(game, parent)
# if we get here, it means that a reroll was used.
assert new_proc is active_proc
assert active_proc.roll is None
assert active_proc.reroll is None
current_step = active_proc.position
try:
assert player.position.distance(current_step) == 1 or is_pickup or is_blitz
except AssertionError as e:
raise e
i = 0
while path.steps[i] != current_step:
i += 1
remaining_current_step_rolls = path.rolls[i][:]
if is_pickup and current_step == final_step:
remaining_current_step_rolls.pop()
num_current_step_remaining_rolls = 0
gfi_proc = game.get_proc(procedures.GFI)
dodge_proc = game.get_proc(procedures.Dodge)
block_proc = game.get_proc(procedures.Block)
if dodge_proc is not None:
num_current_step_remaining_rolls += 1
if gfi_proc is not None and block_proc is None:
num_current_step_remaining_rolls += 1
remaining_current_step_rolls = remaining_current_step_rolls[
len(remaining_current_step_rolls) - num_current_step_remaining_rolls:]
probability_success = reduce(operator.mul, map(lambda d: (7 - d) / 6, remaining_current_step_rolls), 1.0)
rolls = list(collapse(remaining_current_step_rolls))
if current_step != final_step:
step_count = game.get_step()
if block_proc is not None:
player.state.moves -= 1
if player.position != current_step:
try:
game.move(player, current_step)
except AssertionError as e:
raise e
new_path = pf.get_safest_path(game, player, final_step, blitz=is_blitz)
game.revert(step_count)
# try:
# # assert new_path.steps == path.steps[-len(new_path):] this assert can't be made because of small randomness in pathfinder
# assert list(collapse(new_path.rolls)) == list(collapse(path.rolls[-len(new_path):])), f"{new_path.rolls} != {path.rolls[-len(new_path):]}"
# except AssertionError as e:
# raise e
try:
if new_path is not None:
rolls.extend(collapse(new_path.rolls))
probability_success *= new_path.prob
except AttributeError as e:
raise e
if is_pickup:
# remove the pickup roll and probability
rolls.pop()
probability_success /= game.get_pickup_prob(active_proc.player, final_step)
try:
p = np.array(rolls) / sum(rolls)
index_of_failure = np.random.choice(range(len(rolls)), 1, p=p)[0]
except ValueError as e:
raise e
# STEP UNTIL FAILURE (possibly no steps at all)
with only_fixed_rolls(game, d6=[6] * index_of_failure):
while len(botbowl.D6.FixedRolls) > 0:
if len(game.get_available_actions()) > 0:
raise AttributeError("wrong")
game.step()
new_parent = ChanceNode(game, parent)
debug_step_count = game.get_step()
# SUCCESS SCENARIO
with only_fixed_rolls(game, d6=[6] * (len(rolls) - index_of_failure)):
while len(botbowl.D6.FixedRolls) > 0:
if type(game.get_procedure()) not in {procedures.GFI, procedures.Block, procedures.Dodge, procedures.Move,
procedures.MoveAction, procedures.BlitzAction, procedures.HandoffAction}:
raise AttributeError("wrong")
if len(game.get_available_actions()) > 0:
raise AttributeError("wrong")
if type(game.get_procedure()) is procedures.Block and not game.get_procedure().gfi:
raise AttributeError("wrong")
game.step()
success_node = expand_none_action(game, new_parent, moving_handled=True)
new_parent.connect_child(success_node, probability_success)
assert debug_step_count == game.get_step()
# FAILURE SCENARIO
fail_rolls = [1]
if type(game.get_procedure()) is procedures.Dodge and player.can_use_skill(Skill.DODGE):
fail_rolls.append(1)
with only_fixed_rolls(game, d6=fail_rolls):
while len(botbowl.D6.FixedRolls) > 0:
if len(game.get_available_actions()) > 0:
raise AttributeError("wrong")
game.step()
if type(game.get_procedure()) is procedures.Reroll and len(game.get_available_actions()) == 0:
with only_fixed_rolls(game):
game.step()
if type(game.get_procedure()) is {procedures.Dodge, procedures.GFI}:
raise ValueError()
fail_node = expand_none_action(game, new_parent, moving_handled=True)
new_parent.connect_child(fail_node, 1 - probability_success)
assert debug_step_count == game.get_step()
return new_parent
def expand_armor(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
proc: procedures.Armor = game.get_procedure()
assert not proc.foul
p_armorbreak = accumulated_prob_2d_roll[proc.player.get_av() + 1]
new_parent = ChanceNode(game, parent)
expand_with_fixes(game, new_parent, p_armorbreak, d6=[6, 6]) # Armor broken
expand_with_fixes(game, new_parent, 1 - p_armorbreak, d6=[1, 1]) # Armor not broken
return new_parent
def expand_injury(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
proc: procedures.Injury = game.get_procedure()
assert not proc.foul
if proc.in_crowd:
with only_fixed_rolls(game, d6=[5, 4]): # straight to KO
game.step()
return expand_none_action(game, parent)
p_removal = accumulated_prob_2d_roll[8]
new_parent = ChanceNode(game, parent)
expand_with_fixes(game, new_parent, p_removal, d6=[5, 4]) # KO
expand_with_fixes(game, new_parent, 1 - p_removal, d6=[1, 1]) # Stun
return new_parent
def expand_block(game: botbowl.Game, parent: Node) -> Node:
proc: botbowl.Block = game.get_procedure()
assert type(proc) is botbowl.Block
assert not proc.gfi, "Can't handle GFI:s here =( "
assert proc.roll is None
attacker: botbowl.Player = proc.attacker
defender: botbowl.Player = proc.defender
dice = game.num_block_dice(attacker, defender)
num_dice = abs(dice)
# initialize as 1d block without skills
dice_outcomes = np.array([2, 2, 1, 1], dtype=int)
DEF_DOWN, NOONE_DOWN, ALL_DOWN, ATT_DOWN = (0, 1, 2, 3)
die_results = ([BBDieResult.DEFENDER_DOWN, BBDieResult.DEFENDER_STUMBLES],
[BBDieResult.PUSH],
[BBDieResult.BOTH_DOWN],
[BBDieResult.ATTACKER_DOWN])
who_has_block = (attacker.has_skill(Skill.BLOCK), defender.has_skill(Skill.BLOCK))
if any(who_has_block):
dice_outcomes[ALL_DOWN] = 0
die_results[ALL_DOWN].clear()
if who_has_block == (True, True): # both
dice_outcomes[NOONE_DOWN] += 1
die_results[NOONE_DOWN].append(BBDieResult.BOTH_DOWN)
elif who_has_block == (True, False): # only attacker
dice_outcomes[DEF_DOWN] += 1
die_results[DEF_DOWN].append(BBDieResult.BOTH_DOWN)
elif who_has_block == (False, True): # only defender
dice_outcomes[ATT_DOWN] += 1
die_results[ATT_DOWN].append(BBDieResult.BOTH_DOWN)
crowd_surf: bool = game.get_push_squares(attacker.position, defender.position)[0].out_of_bounds
if crowd_surf:
dice_outcomes[DEF_DOWN] += 2
dice_outcomes[NOONE_DOWN] -= 2
die_results[DEF_DOWN].append(BBDieResult.PUSH)
die_results[NOONE_DOWN].remove(BBDieResult.PUSH)
elif defender.has_skill(Skill.DODGE): # and not attacker.has_skill(Skill.TACKLE):
dice_outcomes[DEF_DOWN] -= 1
dice_outcomes[NOONE_DOWN] += 1
die_results[DEF_DOWN].remove(BBDieResult.DEFENDER_STUMBLES)
die_results[NOONE_DOWN].append(BBDieResult.DEFENDER_STUMBLES)
prob = np.zeros(4)
probability_left = 1.0
available_dice = 6
evaluation_order = [DEF_DOWN, NOONE_DOWN, ALL_DOWN, ATT_DOWN]
if dice < 0:
evaluation_order = reversed(evaluation_order)
for i in evaluation_order:
prob[i] = probability_left * (1 - (1 - dice_outcomes[i] / available_dice) ** num_dice)
available_dice -= dice_outcomes[i]
probability_left -= prob[i]
assert available_dice == 0 and probability_left == approx(0) and prob.sum() == approx(1)
new_parent = ChanceNode(game, parent)
for prob, die_res in zip(prob, die_results):
if prob == approx(0) or len(die_res) == 0:
assert prob == approx(0) and len(die_res) == 0
continue
expand_with_fixes(game, new_parent, prob,
block_dice=np.random.choice(die_res, num_dice))
assert sum(new_parent.child_probability) == approx(1.0)
return new_parent
def expand_catch(game: botbowl.Game, parent: Node) -> Node:
# noinspection PyTypeChecker
proc: procedures.Catch = game.get_procedure()
assert type(proc) is procedures.Catch
if not proc.player.can_catch():
with only_fixed_rolls(game):
game.step()
assert game.get_procedure() is not proc
return expand_none_action(game, parent)
if proc.roll is not None:
with only_fixed_rolls(game):
game.step()
if game.get_procedure() is not proc:
# If the catch proc was removed from the stack, we just | |
'Hexacom'},
'9173008':{'en': 'Hexacom'},
'55839932':{'en': 'Claro BR'},
'65913':{'en': 'SingTel'},
'55839930':{'en': 'Claro BR'},
'55839931':{'en': 'Claro BR'},
'65916':{'en': 'StarHub'},
'65917':{'en': 'SingTel'},
'65914':{'en': 'StarHub'},
'65915':{'en': 'SingTel'},
'558799639':{'en': 'TIM'},
'558799638':{'en': 'TIM'},
'9177658':{'en': 'Airtel'},
'558799633':{'en': 'TIM'},
'558799632':{'en': 'TIM'},
'558799631':{'en': 'TIM'},
'558799637':{'en': 'TIM'},
'558799636':{'en': 'TIM'},
'558799635':{'en': 'TIM'},
'558799634':{'en': 'TIM'},
'9173719':{'en': 'Idea'},
'556998465':{'en': 'Brasil Telecom GSM'},
'556998467':{'en': 'Brasil Telecom GSM'},
'556998466':{'en': 'Brasil Telecom GSM'},
'556998461':{'en': 'Brasil Telecom GSM'},
'556998463':{'en': 'Brasil Telecom GSM'},
'556998462':{'en': 'Brasil Telecom GSM'},
'57310':{'en': 'Claro'},
'8536694':{'en': '3'},
'8536695':{'en': '3'},
'8536696':{'en': 'CTM'},
'8536697':{'en': '3'},
'8536690':{'en': 'Kong Seng'},
'8536691':{'en': 'Kong Seng'},
'8536692':{'en': 'CTM'},
'8536693':{'en': 'CTM'},
'62218964':{'en': 'Esia'},
'62218965':{'en': 'Esia'},
'62218966':{'en': 'Esia'},
'62218960':{'en': 'Esia'},
'62218961':{'en': 'Esia'},
'62218962':{'en': 'Esia'},
'559399156':{'en': 'Vivo'},
'9181770':{'en': 'Tata Docomo'},
'9181778':{'en': 'Telewings'},
'918169':{'en': 'Reliance Jio'},
'57312':{'en': 'Claro'},
'918168':{'en': 'Reliance Jio'},
'85577':{'en': 'Cellcard'},
'85570':{'en': 'Smart'},
'85571':{'en': 'Metfone'},
'85578':{'en': 'Cellcard'},
'9175897':{'en': 'Reliance Jio'},
'9181820':{'en': 'Dishnet'},
'56972':{'en': 'Claro'},
'658399':{'en': 'SingTel'},
'658653':{'en': 'SingTel'},
'658398':{'en': 'SingTel'},
'9181198':{'en': 'Hexacom'},
'556398409':{'en': 'Brasil Telecom GSM'},
'658393':{'en': 'SingTel'},
'918299':{'en': 'Reliance Jio'},
'6011209':{'en': 'XOX'},
'6011208':{'en': 'XOX'},
'557499123':{'en': 'TIM'},
'557499122':{'en': 'TIM'},
'557499125':{'en': 'TIM'},
'557499124':{'en': 'TIM'},
'6226391':{'en': 'Esia'},
'6011201':{'en': 'Talk Focus'},
'6011200':{'en': 'Talk Focus'},
'6011203':{'en': 'Talk Focus'},
'6011202':{'en': 'Talk Focus'},
'6011205':{'en': 'XOX'},
'6011204':{'en': 'Talk Focus'},
'6011207':{'en': 'XOX'},
'6011206':{'en': 'XOX'},
'556398408':{'en': 'Brasil Telecom GSM'},
'9176529':{'en': 'CellOne'},
'658396':{'en': 'StarHub'},
'5699785':{'en': 'Entel'},
'5699784':{'en': 'Entel'},
'555399911':{'en': 'TIM'},
'5699786':{'en': 'Entel'},
'5699781':{'en': 'Movistar'},
'5699780':{'en': 'Movistar'},
'5699783':{'en': 'Movistar'},
'5699782':{'en': 'Movistar'},
'9183840':{'en': 'Reliance Jio'},
'5699789':{'en': 'Entel'},
'5699788':{'en': 'Entel'},
'8536640':{'en': 'SmarTone'},
'559498133':{'en': 'TIM'},
'556499907':{'en': 'Vivo'},
'8536645':{'en': '3'},
'8536644':{'en': '3'},
'559898161':{'en': 'TIM'},
'559898162':{'en': 'TIM'},
'559898163':{'en': 'TIM'},
'559898164':{'en': 'TIM'},
'559898165':{'en': 'TIM'},
'559898166':{'en': 'TIM'},
'559898167':{'en': 'TIM'},
'559898168':{'en': 'TIM'},
'559898169':{'en': 'TIM'},
'6236299':{'en': 'Esia'},
'569620':{'en': 'Entel'},
'569621':{'en': 'Entel'},
'569622':{'en': 'Entel'},
'569623':{'en': 'Entel'},
'569624':{'en': 'Entel'},
'569625':{'en': 'Claro'},
'569626':{'en': 'Claro'},
'569627':{'en': 'Claro'},
'569628':{'en': 'Movistar'},
'569629':{'en': 'Movistar'},
'559999173':{'en': 'Vivo'},
'6231931':{'en': 'Esia'},
'559999172':{'en': 'Vivo'},
'559498137':{'en': 'TIM'},
'556498136':{'en': 'TIM'},
'85264511':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'85264510':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'559999176':{'en': 'Vivo'},
'9173490':{'en': 'Airtel'},
'9173491':{'en': 'Airtel'},
'9173492':{'en': 'Airtel'},
'9173493':{'en': 'Airtel'},
'9173494':{'en': 'Airtel'},
'556498132':{'en': 'TIM'},
'9173496':{'en': 'Airtel'},
'9173497':{'en': 'Airtel'},
'9173498':{'en': 'Vodafone'},
'9173499':{'en': 'Vodafone'},
'559999174':{'en': 'Vivo'},
'9177588':{'en': 'Airtel'},
'852593':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852592':{'en': '1O1O / One2Free', 'zh': '1O1O / One2Free', 'zh_Hant': '1O1O / One2Free'},
'852591':{'en': '1O1O / One2Free', 'zh': '1O1O / One2Free', 'zh_Hant': '1O1O / One2Free'},
'852597':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852596':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852594':{'en': 'PCCW Mobile', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852599':{'en': '1O1O / One2Free', 'zh': '1O1O / One2Free', 'zh_Hant': '1O1O / One2Free'},
'852598':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'658649':{'en': 'SingTel'},
'9177000':{'en': 'Airtel'},
'9177009':{'en': 'Airtel'},
'9177008':{'en': 'Airtel'},
'556298118':{'en': 'TIM'},
'9173940':{'en': 'Telenor'},
'9173948':{'en': 'Telenor'},
'9173949':{'en': 'Telenor'},
'9176059':{'en': 'Airtel'},
'917827':{'en': 'Reliance'},
'917826':{'en': 'Vodafone'},
'917825':{'en': 'Vodafone'},
'917824':{'en': 'Vodafone'},
'917822':{'en': 'Reliance'},
'917821':{'en': 'Reliance'},
'917820':{'en': 'Reliance'},
'917829':{'en': 'Vodafone'},
'917828':{'en': 'Reliance'},
'559399144':{'en': 'Vivo'},
'559399145':{'en': 'Vivo'},
'559399146':{'en': 'Vivo'},
'559399147':{'en': 'Vivo'},
'559399141':{'en': 'Vivo'},
'559399142':{'en': 'Vivo'},
'559399143':{'en': 'Vivo'},
'9176058':{'en': 'Airtel'},
'559399148':{'en': 'Vivo'},
'559399149':{'en': 'Vivo'},
'62536204':{'en': 'Esia'},
'9176270':{'en': 'CellOne'},
'62536200':{'en': 'Esia'},
'62536201':{'en': 'Esia'},
'62536202':{'en': 'Esia'},
'62536203':{'en': 'Esia'},
'9181348':{'en': 'Airtel'},
'555598134':{'en': 'TIM'},
'555598135':{'en': 'TIM'},
'555598136':{'en': 'TIM'},
'555598137':{'en': 'TIM'},
'555598131':{'en': 'TIM'},
'555598132':{'en': 'TIM'},
'555598133':{'en': 'TIM'},
'555598138':{'en': 'TIM'},
'555598139':{'en': 'TIM'},
'917411':{'en': 'Tata Docomo'},
'555499628':{'en': 'Vivo'},
'555499629':{'en': 'Vivo'},
'555499624':{'en': 'Vivo'},
'555499625':{'en': 'Vivo'},
'555499626':{'en': 'Vivo'},
'555499627':{'en': 'Vivo'},
'555499621':{'en': 'Vivo'},
'555499622':{'en': 'Vivo'},
'555499623':{'en': 'Vivo'},
'557199157':{'en': 'TIM'},
'557199156':{'en': 'TIM'},
'557199155':{'en': 'TIM'},
'557199154':{'en': 'TIM'},
'557199153':{'en': 'TIM'},
'557199152':{'en': 'TIM'},
'557199151':{'en': 'TIM'},
'62823':{'en': 'Telkomsel'},
'62822':{'en': 'Telkomsel'},
'62821':{'en': 'Telkomsel'},
'917415':{'en': 'Tata Docomo'},
'557199159':{'en': 'TIM'},
'557199158':{'en': 'TIM'},
'917418':{'en': 'Tata Docomo'},
'556198568':{'en': 'Brasil Telecom GSM'},
'556198569':{'en': 'Brasil Telecom GSM'},
'598921':{'en': 'Antel'},
'598920':{'en': 'Antel'},
'601128':{'en': 'U Mobile'},
'601129':{'en': 'Celecom'},
'9174628':{'en': 'Airtel'},
'9174629':{'en': 'Airtel'},
'601124':{'en': 'Maxis'},
'601125':{'en': 'Maxis'},
'601126':{'en': 'DiGi'},
'556198563':{'en': 'Brasil Telecom GSM'},
'556198564':{'en': 'Brasil Telecom GSM'},
'601121':{'en': 'U Mobile'},
'601122':{'en': 'Clixster'},
'601123':{'en': 'Maxis'},
'556199619':{'en': 'Vivo'},
'59891':{'en': 'Antel'},
'59893':{'en': 'Movistar'},
'59894':{'en': 'Movistar'},
'59895':{'en': 'Movistar'},
'59896':{'en': 'Claro'},
'59897':{'en': 'Claro'},
'59898':{'en': 'Antel'},
'59899':{'en': 'Antel'},
'556199613':{'en': 'Vivo'},
'556199612':{'en': 'Vivo'},
'556199615':{'en': 'Vivo'},
'556199614':{'en': 'Vivo'},
'556199617':{'en': 'Vivo'},
'556199616':{'en': 'Vivo'},
'559699114':{'en': 'Vivo'},
'559699115':{'en': 'Vivo'},
'559699116':{'en': 'Vivo'},
'559699117':{'en': 'Vivo'},
'559699111':{'en': 'Vivo'},
'559699112':{'en': 'Vivo'},
'559699113':{'en': 'Vivo'},
'559699118':{'en': 'Vivo'},
'559699119':{'en': 'Vivo'},
'9181520':{'en': 'Idea'},
'852561':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'9181529':{'en': 'Idea'},
'9181528':{'en': 'Idea'},
'9177010':{'en': 'Tata Docomo'},
'9180120':{'en': 'Aircel'},
'9184119':{'en': 'Vodafone'},
'9184118':{'en': 'Vodafone'},
'658505':{'en': 'StarHub'},
'658500':{'en': 'M1'},
'658501':{'en': 'StarHub'},
'658503':{'en': 'StarHub'},
'658508':{'en': 'StarHub'},
'658509':{'en': 'StarHub'},
'558299340':{'en': 'Claro BR'},
'558299341':{'en': 'Claro BR'},
'558299342':{'en': 'Claro BR'},
'558299343':{'en': 'Claro BR'},
'557598275':{'en': 'Claro BR'},
'557598274':{'en': 'Claro BR'},
'557598276':{'en': 'Claro BR'},
'557598271':{'en': 'Claro BR'},
'557598270':{'en': 'Claro BR'},
'557598273':{'en': 'Claro BR'},
'557598272':{'en': 'Claro BR'},
'918171':{'en': 'Airtel'},
'558599943':{'en': 'TIM'},
'558599942':{'en': 'TIM'},
'558599941':{'en': 'TIM'},
'558599947':{'en': 'TIM'},
'558599946':{'en': 'TIM'},
'558599945':{'en': 'TIM'},
'558599944':{'en': 'TIM'},
'558599949':{'en': 'TIM'},
'558599948':{'en': 'TIM'},
'55849812':{'en': 'Vivo'},
'55849813':{'en': 'Vivo'},
'55849810':{'en': 'Vivo'},
'55849811':{'en': 'Vivo'},
'917225':{'en': 'Airtel'},
'917224':{'en': 'Airtel'},
'917227':{'en': 'Airtel'},
'917226':{'en': 'Airtel'},
'917221':{'en': 'Aircel'},
'917220':{'en': 'Aircel'},
'917223':{'en': 'Airtel'},
'917222':{'en': 'Aircel'},
'58412':{'en': 'Digitel GSM'},
'58416':{'en': 'Movilnet'},
'58414':{'en': 'movistar'},
'67576':{'en': 'bmobile'},
'9174868':{'en': 'Airtel'},
'559899974':{'en': 'Oi'},
'559899975':{'en': 'Oi'},
'559899976':{'en': 'Oi'},
'559899970':{'en': 'Oi'},
'559899971':{'en': 'Oi'},
'559899972':{'en': 'Oi'},
'559899973':{'en': 'Oi'},
'917049':{'en': 'Idea'},
'917048':{'en': 'Tata Docomo'},
'917041':{'en': 'Telewings'},
'917040':{'en': 'Aircel'},
'917043':{'en': 'Airtel'},
'917042':{'en': 'Airtel'},
'917045':{'en': 'Vodafone'},
'917044':{'en': 'Airtel'},
'917047':{'en': 'Airtel'},
'917046':{'en': 'Idea'},
'9175898':{'en': 'Reliance Jio'},
'9175899':{'en': 'Reliance Jio'},
'918345':{'en': 'Idea'},
'8526459':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'8526458':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'8526457':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'8526456':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'8526455':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'8526454':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'9175896':{'en': 'Reliance Jio'},
'8526452':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'8526450':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'918349':{'en': 'Airtel'},
'918348':{'en': 'Vodafone'},
'67575':{'en': 'bmobile'},
'658823':{'en': 'M1'},
'918223':{'en': 'Idea'},
'558899968':{'en': 'TIM'},
'558899969':{'en': 'TIM'},
'9176279':{'en': 'Hexacom'},
'9176278':{'en': 'CellOne'},
'558899962':{'en': 'TIM'},
'558899963':{'en': 'TIM'},
'558899961':{'en': 'TIM'},
'558899966':{'en': 'TIM'},
'558899967':{'en': 'TIM'},
'558899964':{'en': 'TIM'},
'558899965':{'en': 'TIM'},
'658821':{'en': 'M1'},
'6225132':{'en': 'Esia'},
'6225133':{'en': 'Esia'},
'9172958':{'en': 'Hexacom'},
'6225131':{'en': 'Esia'},
'9172950':{'en': 'Hexacom'},
'65932':{'en': 'M1'},
'65934':{'en': 'M1'},
'65936':{'en': 'M1'},
'65938':{'en': 'StarHub'},
'65939':{'en': 'SingTel'},
'9173029':{'en': 'Vodafone'},
'658820':{'en': 'M1'},
'9173021':{'en': 'Hexacom'},
'9173020':{'en': 'Hexacom'},
'9173023':{'en': 'Hexacom'},
'9173022':{'en': 'Hexacom'},
'9173025':{'en': 'Vodafone'},
'9173024':{'en': 'Hexacom'},
'9173027':{'en': 'Vodafone'},
'9173026':{'en': 'Vodafone'},
'9177638':{'en': 'Airtel'},
'9177639':{'en': 'Airtel'},
'556998449':{'en': 'Brasil Telecom GSM'},
'556998448':{'en': 'Brasil Telecom GSM'},
'556998443':{'en': 'Brasil Telecom GSM'},
'556998442':{'en': 'Brasil Telecom GSM'},
'556998441':{'en': 'Brasil Telecom GSM'},
'556998447':{'en': 'Brasil Telecom GSM'},
'556998446':{'en': 'Brasil Telecom GSM'},
'556998445':{'en': 'Brasil Telecom GSM'},
'556998444':{'en': 'Brasil Telecom GSM'},
'556699638':{'en': 'Vivo'},
'556699639':{'en': 'Vivo'},
'556699632':{'en': 'Vivo'},
'556699633':{'en': 'Vivo'},
'556699631':{'en': 'Vivo'},
'556699636':{'en': 'Vivo'},
'556699637':{'en': 'Vivo'},
'556699634':{'en': 'Vivo'},
'556699635':{'en': 'Vivo'},
'9181147':{'en': 'Airtel'},
'9176528':{'en': 'CellOne'},
'9181410':{'en': 'Vodafone'},
'9174640':{'en': 'Airtel'},
'918013':{'en': 'Aircel'},
'558999984':{'en': 'TIM'},
'9181149':{'en': 'Airtel'},
'9181758':{'en': 'Airtel'},
'9181759':{'en': 'Tata Docomo'},
'9181230':{'en': 'Tata Docomo'},
'5696770':{'en': 'Celupago'},
'5696775':{'en': 'Entel'},
'5696777':{'en': 'Entel'},
'5696776':{'en': 'Entel'},
'5696779':{'en': 'Entel'},
'5696778':{'en': 'Entel'},
'658339':{'en': 'SingTel'},
'658338':{'en': 'SingTel'},
'658333':{'en': 'M1'},
'658332':{'en': 'StarHub'},
'658331':{'en': 'StarHub'},
'658330':{'en': 'StarHub'},
'658337':{'en': 'StarHub'},
'658336':{'en': 'StarHub'},
'658335':{'en': 'StarHub'},
'658334':{'en': 'StarHub'},
'85365470':{'en': 'CTM'},
'85365471':{'en': 'CTM'},
'85365472':{'en': 'CTM'},
'85365473':{'en': 'CTM'},
'85365474':{'en': 'CTM'},
'85365475':{'en': 'SmarTone'},
'85365476':{'en': 'SmarTone'},
'85365477':{'en': 'SmarTone'},
'85365478':{'en': 'SmarTone'},
'85365479':{'en': 'SmarTone'},
'9175708':{'en': 'Vodafone'},
'9175709':{'en': 'Vodafone'},
'6243899':{'en': 'Esia'},
'9176369':{'en': 'Airtel'},
'56958':{'en': 'Movistar'},
'56959':{'en': 'Claro'},
'56954':{'en': 'Claro'},
'56956':{'en': 'Entel'},
'56957':{'en': 'Entel'},
'56950':{'en': 'Claro'},
'56953':{'en': 'Movistar'},
'556499998':{'en': 'Vivo'},
'9181570':{'en': 'Idea'},
'556499991':{'en': 'Vivo'},
'556499995':{'en': 'Vivo'},
'556499994':{'en': 'Vivo'},
'556499997':{'en': 'Vivo'},
'556499996':{'en': 'Vivo'},
'5574989':{'en': 'Oi'},
'5574988':{'en': 'Oi'},
'5574987':{'en': 'Oi'},
'5574986':{'en': 'Oi'},
'5574985':{'en': 'Oi'},
'9180800':{'en': 'Reliance'},
'658197':{'en': 'M1'},
'658196':{'en': 'M1'},
'658195':{'en': 'M1'},
'658194':{'en': 'M1'},
'658193':{'en': 'M1'},
'658192':{'en': 'M1'},
'658191':{'en': 'M1'},
'658190':{'en': 'M1'},
'6227391':{'en': 'Esia'},
'658199':{'en': 'M1'},
'658198':{'en': 'StarHub'},
'5577988':{'en': 'Oi'},
'5577989':{'en': 'Oi'},
'5577986':{'en': 'Oi'},
'5577987':{'en': 'Oi'},
'5577985':{'en': 'Oi'},
'62232933':{'en': 'Esia'},
'62232932':{'en': 'Esia'},
'62232931':{'en': 'Esia'},
'62232930':{'en': 'Esia'},
'62232937':{'en': 'Esia'},
'62232936':{'en': 'Esia'},
'62232935':{'en': 'Esia'},
'62232934':{'en': 'Esia'},
'918019':{'en': 'Tata Docomo'},
'62232938':{'en': 'Esia'},
'917698':{'en': 'Idea'},
'917699':{'en': 'Idea'},
'917694':{'en': 'Idea'},
'917695':{'en': 'Reliance'},
'917696':{'en': 'Tata Docomo'},
'917697':{'en': 'Idea'},
'917690':{'en': 'Idea'},
'917691':{'en': 'Idea'},
'917692':{'en': 'Idea'},
'917693':{'en': 'Idea'},
'918011':{'en': 'Airtel'},
'918279':{'en': 'Reliance Jio'},
'9173700':{'en': 'Idea'},
'569648':{'en': 'Movistar'},
'569649':{'en': 'Movistar'},
'569642':{'en': 'WOM'},
'569643':{'en': 'WOM'},
'569640':{'en': 'Movistar'},
'569641':{'en': 'WOM'},
'569646':{'en': 'Movistar'},
'569647':{'en': 'Movistar'},
'569644':{'en': 'WOM'},
'569645':{'en': 'WOM'},
'5598989':{'en': 'Oi'},
'55759993':{'en': 'Vivo'},
'55759992':{'en': 'Vivo'},
'55759991':{'en': 'Vivo'},
'55759990':{'en': 'Vivo'},
'55759995':{'en': 'Vivo'},
'55759994':{'en': 'Vivo'},
'9176480':{'en': 'CellOne'},
'9176488':{'en': 'CellOne'},
'9176489':{'en': 'CellOne'},
'559799179':{'en': 'Vivo'},
'559799178':{'en': 'Vivo'},
'559799171':{'en': 'Vivo'},
'559799173':{'en': 'Vivo'},
'559799172':{'en': 'Vivo'},
'559799175':{'en': 'Vivo'},
'559799174':{'en': 'Vivo'},
'559799177':{'en': 'Vivo'},
'559799176':{'en': 'Vivo'},
'9176784':{'en': 'Reliance Jio'},
'85264539':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'85264538':{'en': 'CITIC', 'zh': u('\u4e2d\u4fe1\u56fd\u9645\u7535\u8baf'), 'zh_Hant': u('\u4e2d\u4fe1\u570b\u969b\u96fb\u8a0a')},
'62216064':{'en': 'Esia'},
'62216063':{'en': 'Esia'},
'62216062':{'en': 'Esia'},
'62216061':{'en': 'Esia'},
'62216060':{'en': 'Esia'},
'9176738':{'en': 'Reliance'},
'9176739':{'en': 'Airtel'},
'917841':{'en': 'Telewings'},
'917843':{'en': 'Dishnet'},
'917842':{'en': 'Tata Docomo'},
'917845':{'en': 'Tata Docomo'},
'917844':{'en': 'Dishnet'},
'917847':{'en': 'Reliance'},
'917846':{'en': 'Reliance'},
'917849':{'en': 'Reliance'},
'917848':{'en': 'Reliance'},
'614888':{'en': 'My Number'},
'9177919':{'en': 'Aircel'},
'9177918':{'en': 'Aircel'},
'918017':{'en': 'Vodafone'},
'9175806':{'en': 'Vodafone'},
'9173278':{'en': 'Airtel'},
| |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016-2018 by I3py Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Tools for instruments relying on the VISA protocol.
"""
import logging
import os
from inspect import cleandoc
from time import sleep
from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
from pyvisa import errors
from pyvisa.highlevel import ResourceManager
from pyvisa.rname import assemble_canonical_name, to_canonical_name
from ...core import subsystem
from ...core.actions import BaseAction
from ...core.base_driver import BaseDriver
from ...core.composition import SupportMethodCustomization
from ...core.errors import I3pyInterfaceNotSupported
from ...core.features import AbstractFeature
_RESOURCE_MANAGERS = None
def get_visa_resource_manager(backend='default'):
"""Access a VISA resource manager in use by I3py.
"""
global _RESOURCE_MANAGERS
if not _RESOURCE_MANAGERS:
_RESOURCE_MANAGERS = {}
if backend not in _RESOURCE_MANAGERS:
if backend == 'default':
def_backend = os.environ.get('I3PY_VISA', '@ni')
mess = cleandoc('''Creating default Visa resource manager for I3py
with backend {}.'''.format(def_backend))
logging.debug(mess)
_RESOURCE_MANAGERS[backend] = ResourceManager(def_backend)
elif '@' in backend:
_RESOURCE_MANAGERS[backend] = ResourceManager(backend)
return _RESOURCE_MANAGERS[backend]
def set_visa_resource_manager(rm, backend='default'):
"""Set a VISA resource manager in use by I3py.
This operation can only be performed once per backend id, and should be
performed before any driver relying on this backend is created..
Parameters
----------
rm : ResourceManager
Instance to use as Lantz resource manager.
backend : str
Id of the backend.
"""
global _RESOURCE_MANAGERS
assert isinstance(rm, ResourceManager)
if _RESOURCE_MANAGERS and backend in _RESOURCE_MANAGERS:
msg = 'Cannot set I3py VISA resource manager once one already exists.'
raise ValueError(msg)
if not _RESOURCE_MANAGERS:
_RESOURCE_MANAGERS = {backend: rm}
else:
_RESOURCE_MANAGERS[backend] = rm
class VisaFeature(SupportMethodCustomization, property):
"""Special property used to wrap a property present in a Pyvisa resource.
Visa properties are expected to be defined on the visa_resource subsystem.
"""
def __init__(self, settable=True, deleter=None):
super(VisaFeature, self).__init__(self._get,
self._set if settable else None,
deleter)
self.name = None
def clone(self):
"""Clone itself by inspecting the presence of setter/deleter.
"""
return type(self)(self.fset is not None,
self.fdel)
def create_default_settings(self):
"""A visa feature has no dynamic features.
"""
return {}
def make_doc(self, doc):
"""Do not alter the user doc.
"""
return doc
@property
def self_alias(self) -> str:
"""For features self is replaced by feat in function signature.
"""
return 'feat'
def analyse_function(self, method_name: str, func: Callable,
specifiers: Tuple[str, ...]):
"""Check the signature of the function.
"""
raise RuntimeError('VisaFeatures do not support customization.')
def _get(self, obj):
if obj.parent._resource:
return getattr(obj.parent._resource, self.name)
else:
return obj.parent.resource_kwargs.get(self.name)
def _set(self, obj, value):
obj.parent.resource_kwargs[self.name] = value
if obj.parent._resource:
setattr(obj.parent._resource, self.name, value)
AbstractFeature.register(VisaFeature)
class VisaAction(BaseAction):
"""Action used for method modifying the VISA resource state.
By default all calls to visa actions acquie the instrument lock to protect
the instrument.
"""
def __init__(self, **kwargs):
kwargs.setdefault('lock', True)
super().__init__(**kwargs)
def timeout_deleter(obj):
del obj.parent.resource_kwargs['timeout']
if obj.parent._resource:
del obj.parent._resource.timeout
class BaseVisaDriver(BaseDriver):
"""Base class for instrument communicating through the VISA protocol.
It handles the connection management, but not the subsequent communication.
That's why driver should not inherit from it but from one of its derived
class (save for very peculiar use).
Parameters
----------
resource_name : str, optional
Name of the visa resource. can be specified as positional argument.
backend : str, optional
The PyVISA backend to use. This can either be a backend alias declared
using set_visa_resource_manager or a valid string to create a pyvisa
resource manager.
parameters : dict, optional
A dict to alter the driver attributes.
caching_allowed : bool, optional
Boolean use to determine if instrument properties can be cached
kwargs :
Arguments that PyVISA can use to build a resource name. Those depend
on the interface type (*interface_type* keyword), please see PyVisa
documentation for ore details.
"""
#: Exceptions triggering a new communication attempts for Features with a
#: non zero retries values.
retries_exceptions = (TimeoutError, errors.VisaIOError, # type: ignore
errors.InvalidSession)
#: Interfaces supported by the instrument.
#: For each type of interface a dictionary (or a list of dictionary),
#: specifying the default arguments to use should be provided.
#: For example::
#:
#: {'USB': [{'resource_class': 'INSTR'},
#: {'resource_class': 'RAW'}],
#: 'TCPIP': {'resource_class': 'SOCKET',
#: 'port': '50000'}
INTERFACES: ClassVar[Dict[str, Union[Dict[str, str],
List[Dict[str, str]]]]] = {}
#: Default arguments passed to the Resource constructor on initialize.
#: It should be specified in two layers, the first indicating the
#: interface type and the second the corresponding arguments.
#: The key COMMON is used to indicate keywords for all interfaces.
#: For example:
#:
#: {'ASRL': {'read_termination': '\n',
#: 'baud_rate': 9600},
#: 'USB': {'read_termination': \r'},
#: 'COMMON': {'write_termination': '\n'}
#: }
DEFAULTS: ClassVar[Optional[Dict[str, Dict[str, Any]]]] = None
#: Tuple of keywords unrelated to Visa resource name. Used to remove them
#: from the kwargs when building the resource name.
NON_VISA_NAMES: ClassVar[Tuple[str, ...]] = ('parameters', 'backend')
def __init__(self, *args, **kwargs):
super(BaseVisaDriver, self).__init__(*args, **kwargs)
# This entry is populated by the compute_id class method (called by the
# the metaclass) from the provided information.
r_name = kwargs['resource_name']
rm = get_visa_resource_manager(kwargs.get('backend', 'default'))
self._resource_manager = rm
# Does not work with Visa alias
r_info = self._resource_manager.resource_info(r_name)
if r_info:
#: Keyword arguments passed to the resource during initialization.
kw = self._get_defaults_kwargs(r_info.interface_type.name.upper(),
r_info.resource_class,
kwargs.get('parameters', {}))
self.resource_kwargs = kw
else:
# Allow to at least get the COMMON parameters.
kw = self._get_defaults_kwargs(None,
None,
kwargs.get('parameters', {}))
self.resource_kwargs = kw
#: The resource name
self.resource_name = r_name
# The resource will be created when the driver is initialized.
self._resource = None
@classmethod
def compute_id(cls, args, kwargs):
"""Assemble the resource name from the provided info.
"""
rname = None
if args:
msg = 'A single positional argument is allowed for %s' % cls
assert len(args) == 1, msg
rname = args[0]
elif 'resource_name' in kwargs:
rname = kwargs['resource_name']
if rname:
try:
kwargs['resource_name'] = to_canonical_name(rname)
except Exception: # TODO Use a more adequate exception
# Fail silently to allow the use of VISA alias
kwargs['resource_name'] = rname
else:
visa_infos = cls._get_visa_infos(kwargs)
kwargs['resource_name'] =\
assemble_canonical_name(**visa_infos)
return kwargs['resource_name']
@classmethod
def _get_visa_infos(cls, connection_infos):
"""Filter out non-VISA related keywords and fill the gaps using
INTERFACES
"""
interface_type = connection_infos['interface_type']
default_protocol = cls.INTERFACES.get(interface_type, {})
if not isinstance(default_protocol, dict):
default_protocol = default_protocol[0]
visa_infos = {k: v for k, v in connection_infos.items()
if k not in cls.NON_VISA_NAMES}
default_protocol.update(visa_infos)
return default_protocol
@classmethod
def _get_defaults_kwargs(cls, interface_type, resource_class,
user_kwargs):
"""Compute the default keyword arguments.
This is done by combining:
- user provided keyword arguments.
- (interface_type, resource_class) keyword arguments.
- interface_type keyword arguments.
- resource_class keyword arguments.
- common keyword arguments.
(the first ones have precedence)
Parameters
----------
interface_type : str|None, {'ASRL', 'USB', 'TCPIP', 'GPIB', 'PXI'}
Type of interface.
resource_class : str|None, {'INSTR', 'SOCKET', 'RAW'}
Class of resource.
Returns
-------
kwargs : dict
The keyword arguments to use when opening a session.
"""
if cls.DEFAULTS:
kwargs = {}
for key in ('COMMON', resource_class, interface_type,
(interface_type, resource_class)):
if key not in cls.DEFAULTS:
continue
value = cls.DEFAULTS[key]
if value is None:
msg = 'An %s instrument is not supported by the driver %s'
raise I3pyInterfaceNotSupported(msg, key, cls.__name__)
if value:
kwargs.update(value)
if user_kwargs:
kwargs.update(user_kwargs)
return kwargs
else:
return user_kwargs
def initialize(self):
rm = self._resource_manager
self._resource = rm.open_resource(self.resource_name,
**self.resource_kwargs)
def finalize(self):
self._resource.close()
self._resource = None
def reopen_connection(self):
"""Close and re-open a suspicious connection.
A VISA clear command is issued after re-opening the connection to make
sure the instrument queues do not keep corrupted data. This might be
an issue with some instruments in such a case simply override this
method.
"""
self.finalize()
self.initialize()
self._resource.clear()
# Make sure the clear command completed before sending more commands.
sleep(0.3)
# --- Pyvisa wrappers
#: Direct access to the visa resource.
visa_resource = subsystem()
with visa_resource as vr:
#: The timeout in milliseconds for all resource I/O operations.
#:
#: None is mapped to VI_TMO_INFINITE.
#: A value less than 1 is mapped to VI_TMO_IMMEDIATE.
vr.timeout = VisaFeature(True, timeout_deleter)
#: Pyvisa resource info.
vr.resource_info = VisaFeature(settable=False)
#: Pyvisa interface type
vr.interface_type = VisaFeature(settable=False)
@vr
@VisaAction()
def clear(self):
"""Clears this resource.
"""
self.parent._resource.clear()
@vr
@VisaAction()
def install_handler(self, event_type, handler, user_handle=None):
"""See Pyvisa docs.
"""
return self.parent._resource.install_handler(event_type, handler,
user_handle)
@vr
@VisaAction()
def uninstall_handler(self, event_type, handler, user_handle=None):
"""See | |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like rm command for cloud storage providers."""
from __future__ import absolute_import
from gslib.cloud_api import BucketNotFoundException
from gslib.cloud_api import NotEmptyException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import ServiceException
from gslib.command import Command
from gslib.command import GetFailureCount
from gslib.command import ResetFailureCount
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.name_expansion import NameExpansionIterator
from gslib.storage_url import StorageUrlFromString
from gslib.translation_helper import PreconditionsFromHeaders
from gslib.util import GetCloudApiInstance
from gslib.util import NO_MAX
from gslib.util import Retry
from gslib.util import StdinIterator
_SYNOPSIS = """
gsutil rm [-f] [-r] url...
gsutil rm [-f] [-r] -I
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The gsutil rm command removes objects.
For example, the command:
gsutil rm gs://bucket/subdir/*
will remove all objects in gs://bucket/subdir, but not in any of its
sub-directories. In contrast:
gsutil rm gs://bucket/subdir/**
will remove all objects under gs://bucket/subdir or any of its
subdirectories.
You can also use the -r option to specify recursive object deletion. Thus, for
example, either of the following two commands will remove gs://bucket/subdir
and all objects and subdirectories under it:
gsutil rm gs://bucket/subdir**
gsutil rm -r gs://bucket/subdir
The -r option will also delete all object versions in the subdirectory for
versioning-enabled buckets, whereas the ** command will only delete the live
version of each object in the subdirectory.
Running gsutil rm -r on a bucket will delete all versions of all objects in
the bucket, and then delete the bucket:
gsutil rm -r gs://bucket
If you want to delete all objects in the bucket, but not the bucket itself,
this command will work:
gsutil rm gs://bucket/**
If you have a large number of objects to remove you might want to use the
gsutil -m option, to perform a parallel (multi-threaded/multi-processing)
removes:
gsutil -m rm -r gs://my_bucket/subdir
You can pass a list of URLs (one per line) to remove on stdin instead of as
command line arguments by using the -I option. This allows you to use gsutil
in a pipeline to remove objects identified by a program, such as:
some_program | gsutil -m rm -I
The contents of stdin can name cloud URLs and wildcards of cloud URLs.
Note that gsutil rm will refuse to remove files from the local
file system. For example this will fail:
gsutil rm *.txt
WARNING: Object removal cannot be undone. Google Cloud Storage is designed
to give developers a high amount of flexibility and control over their data,
and Google maintains strict controls over the processing and purging of
deleted data. To protect yourself from mistakes, you can configure object
versioning on your bucket(s). See 'gsutil help versions' for details.
<B>DATA RESTORATION FROM ACCIDENTAL DELETION OR OVERWRITES</B>
Google Cloud Storage does not provide support for restoring data lost
or overwritten due to customer errors. If you have concerns that your
application software (or your users) may at some point erroneously delete or
overwrite data, you can protect yourself from that risk by enabling Object
Versioning (see "gsutil help versioning"). Doing so increases storage costs,
which can be partially mitigated by configuring Lifecycle Management to delete
older object versions (see "gsutil help lifecycle").
<B>OPTIONS</B>
-f Continues silently (without printing error messages) despite
errors when removing multiple objects. If some of the objects
could not be removed, gsutil's exit status will be non-zero even
if this flag is set. This option is implicitly set when running
"gsutil -m rm ...".
-I Causes gsutil to read the list of objects to remove from stdin.
This allows you to run a program that generates the list of
objects to remove.
-R, -r The -R and -r options are synonymous. Causes bucket or bucket
subdirectory contents (all objects and subdirectories that it
contains) to be removed recursively. If used with a bucket-only
URL (like gs://bucket), after deleting objects and subdirectories
gsutil will delete the bucket. This option implies the -a option
and will delete all object versions.
-a Delete all versions of an object.
""")
def _RemoveExceptionHandler(cls, e):
"""Simple exception handler to allow post-completion status."""
if not cls.continue_on_error:
cls.logger.error(str(e))
# TODO: Use shared state to track missing bucket names when we get a
# BucketNotFoundException. Then improve bucket removal logic and exception
# messages.
if isinstance(e, BucketNotFoundException):
cls.bucket_not_found_count += 1
cls.logger.error(str(e))
else:
cls.op_failure_count += 1
# pylint: disable=unused-argument
def _RemoveFoldersExceptionHandler(cls, e):
"""When removing folders, we don't mind if none exist."""
if (isinstance(e, CommandException.__class__) and
'No URLs matched' in e.message) or isinstance(e, NotFoundException):
pass
else:
raise e
def _RemoveFuncWrapper(cls, name_expansion_result, thread_state=None):
cls.RemoveFunc(name_expansion_result, thread_state=thread_state)
class RmCommand(Command):
"""Implementation of gsutil rm command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'rm',
command_name_aliases=['del', 'delete', 'remove'],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=NO_MAX,
supported_sub_args='afIrR',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
]
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='rm',
help_name_aliases=['del', 'delete', 'remove'],
help_type='command_help',
help_one_line_summary='Remove objects',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def RunCommand(self):
"""Command entry point for the rm command."""
# self.recursion_requested is initialized in command.py (so it can be
# checked in parent class for all commands).
self.continue_on_error = self.parallel_operations
self.read_args_from_stdin = False
self.all_versions = False
if self.sub_opts:
for o, unused_a in self.sub_opts:
if o == '-a':
self.all_versions = True
elif o == '-f':
self.continue_on_error = True
elif o == '-I':
self.read_args_from_stdin = True
elif o == '-r' or o == '-R':
self.recursion_requested = True
self.all_versions = True
if self.read_args_from_stdin:
if self.args:
raise CommandException('No arguments allowed with the -I flag.')
url_strs = StdinIterator()
else:
if not self.args:
raise CommandException('The rm command (without -I) expects at '
'least one URL.')
url_strs = self.args
# Tracks if any deletes failed.
self.op_failure_count = 0
# Tracks if any buckets were missing.
self.bucket_not_found_count = 0
bucket_urls_to_delete = []
bucket_strings_to_delete = []
if self.recursion_requested:
bucket_fields = ['id']
for url_str in url_strs:
url = StorageUrlFromString(url_str)
if url.IsBucket() or url.IsProvider():
for blr in self.WildcardIterator(url_str).IterBuckets(
bucket_fields=bucket_fields):
bucket_urls_to_delete.append(blr.storage_url)
bucket_strings_to_delete.append(url_str)
self.preconditions = PreconditionsFromHeaders(self.headers or {})
try:
# Expand wildcards, dirs, buckets, and bucket subdirs in URLs.
name_expansion_iterator = NameExpansionIterator(
self.command_name, self.debug, self.logger, self.gsutil_api,
url_strs, self.recursion_requested, project_id=self.project_id,
all_versions=self.all_versions,
continue_on_error=self.continue_on_error or self.parallel_operations)
# Perform remove requests in parallel (-m) mode, if requested, using
# configured number of parallel processes and threads. Otherwise,
# perform requests with sequential function calls in current process.
self.Apply(_RemoveFuncWrapper, name_expansion_iterator,
_RemoveExceptionHandler,
fail_on_error=(not self.continue_on_error),
shared_attrs=['op_failure_count', 'bucket_not_found_count'])
# Assuming the bucket has versioning enabled, url's that don't map to
# objects should throw an error even with all_versions, since the prior
# round of deletes only sends objects to a history table.
# This assumption that rm -a is only called for versioned buckets should be
# corrected, but the fix is non-trivial.
except CommandException as e:
# Don't raise if there are buckets to delete -- it's valid to say:
# gsutil rm -r gs://some_bucket
# if the bucket is empty.
if not bucket_urls_to_delete and not self.continue_on_error:
raise
# Reset the failure count if we failed due to an empty bucket that we're
# going to delete.
msg = 'No URLs matched: '
if msg in str(e):
parts = str(e).split(msg)
if len(parts) == 2 and parts[1] in bucket_strings_to_delete:
ResetFailureCount()
else:
raise
except ServiceException, e:
if not self.continue_on_error:
raise
if self.bucket_not_found_count:
raise CommandException('Encountered non-existent bucket during listing')
if self.op_failure_count and not self.continue_on_error:
raise CommandException('Some files could not be removed.')
# If this was a gsutil rm -r command covering any bucket subdirs,
# remove any dir_$folder$ objects (which are created by various web UI
# tools to simulate folders).
if self.recursion_requested:
had_previous_failures = GetFailureCount() > 0
folder_object_wildcards = []
for url_str in url_strs:
| |
<reponame>Guts/feedparser
# Support for the GeoRSS format
# Copyright 2010-2020 <NAME> <<EMAIL>>
# Copyright 2002-2008 <NAME>
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import generator_stop
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
"http://www.w3.org/2003/01/geo/wgs84_pos#": "geo",
"http://www.georss.org/georss": "georss",
"http://www.opengis.net/gml": "gml",
}
def __init__(self):
self.ingeometry = 0
super(Namespace, self).__init__()
def _start_georssgeom(self, attrs_d):
self.push("geometry", 0)
context = self._get_context()
context["where"] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._get_context()
context["where"].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop("geometry"))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop("geometry"))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop("geometry")
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop("geometry"))
if geometry:
self._save_where(geometry)
def _start_where(self, attrs_d):
self.push("where", 0)
context = self._get_context()
context["where"] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrs_d):
srs_name = attrs_d.get("srsname")
try:
srs_dimension = int(attrs_d.get("srsdimension", "2"))
except ValueError:
srs_dimension = 2
context = self._get_context()
context["where"]["srsName"] = srs_name
context["where"]["srsDimension"] = srs_dimension
def _start_gml_point(self, attrs_d):
self._parse_srs_attrs(attrs_d)
self.ingeometry = 1
self.push("geometry", 0)
def _start_gml_linestring(self, attrs_d):
self._parse_srs_attrs(attrs_d)
self.ingeometry = "linestring"
self.push("geometry", 0)
def _start_gml_polygon(self, attrs_d):
self._parse_srs_attrs(attrs_d)
self.push("geometry", 0)
def _start_gml_exterior(self, attrs_d):
self.push("geometry", 0)
def _start_gml_linearring(self, attrs_d):
self.ingeometry = "polygon"
self.push("geometry", 0)
def _start_gml_pos(self, attrs_d):
self.push("pos", 0)
def _end_gml_pos(self):
this = self.pop("pos")
context = self._get_context()
srs_name = context["where"].get("srsName")
srs_dimension = context["where"].get("srsDimension", 2)
swap = True
if srs_name and "EPSG" in srs_name:
epsg = int(srs_name.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srs_dimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrs_d):
self.push("pos", 0)
def _end_gml_poslist(self):
this = self.pop("pos")
context = self._get_context()
srs_name = context["where"].get("srsName")
srs_dimension = context["where"].get("srsDimension", 2)
swap = True
if srs_name and "EPSG" in srs_name:
epsg = int(srs_name.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(this, self.ingeometry, swap=swap, dims=srs_dimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop("geometry")
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop("where")
_end_georss_where = _end_where
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == "linestring":
return _parse_georss_line(value, swap, dims)
elif geom_type == "polygon":
ring = _parse_georss_line(value, swap, dims)
return {"type": "Polygon", "coordinates": (ring["coordinates"],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = (float(ll) for ll in value.replace(",", " ").split())
while True:
try:
t = [next(latlons), next(latlons)][:: swap and -1 or 1]
if dims == 3:
t.append(next(latlons))
yield tuple(t)
except StopIteration:
return
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {"type": "Point", "coordinates": coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {"type": "LineString", "coordinates": coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {"type": "Polygon", "coordinates": (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space separate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {"type": "Box", "coordinates": tuple(coords)}
except (IndexError, ValueError):
return None
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819,
3821,
3824,
3889,
3906,
4001,
4002,
4003,
4004,
4005,
4006,
4007,
4008,
4009,
4010,
4011,
4012,
4013,
4014,
4015,
4016,
4018,
4019,
4020,
4021,
4022,
4023,
4024,
4025,
4027,
4028,
4029,
4030,
4031,
4032,
4033,
4034,
4035,
4036,
4041,
4042,
4043,
4044,
4045,
4046,
4047,
4052,
4053,
4054,
4055,
4075,
4081,
4120,
4121,
4122,
4123,
4124,
4125,
4126,
4127,
4128,
4129,
4130,
4131,
4132,
4133,
4134,
4135,
4136,
4137,
4138,
4139,
4140,
4141,
4142,
4143,
4144,
4145,
4146,
4147,
4148,
4149,
4150,
4151,
4152,
4153,
4154,
4155,
4156,
4157,
4158,
4159,
4160,
4161,
4162,
4163,
4164,
4165,
4166,
4167,
4168,
4169,
4170,
4171,
4172,
4173,
4174,
4175,
4176,
4178,
4179,
4180,
4181,
4182,
4183,
4184,
4185,
4188,
4189,
4190,
4191,
4192,
4193,
4194,
4195,
4196,
4197,
4198,
4199,
4200,
4201,
4202,
4203,
4204,
4205,
4206,
4207,
4208,
4209,
4210,
4211,
4212,
4213,
4214,
4215,
4216,
4218,
4219,
4220,
4221,
4222,
4223,
4224,
4225,
4226,
4227,
4228,
4229,
4230,
4231,
4232,
4233,
4234,
4235,
4236,
4237,
4238,
4239,
4240,
4241,
4242,
4243,
4244,
4245,
4246,
4247,
4248,
4249,
4250,
4251,
4252,
4253,
4254,
4255,
4256,
4257,
4258,
4259,
4260,
4261,
4262,
4263,
4264,
4265,
4266,
4267,
4268,
4269,
4270,
4271,
4272,
4273,
4274,
4275,
4276,
4277,
4278,
4279,
4280,
4281,
4282,
4283,
4284,
4285,
4286,
4287,
4288,
4289,
4291,
4292,
4293,
4294,
4295,
4296,
4297,
4298,
4299,
4300,
4301,
4302,
4303,
4304,
4306,
4307,
4308,
4309,
4310,
4311,
4312,
4313,
4314,
4315,
4316,
4317,
4318,
4319,
4322,
4324,
4326,
4463,
4470,
4475,
4483,
4490,
4555,
4558,
4600,
4601,
4602,
4603,
4604,
4605,
4606,
4607,
4608,
4609,
4610,
4611,
4612,
4613,
4614,
4615,
4616,
4617,
4618,
4619,
4620,
4621,
4622,
4623,
4624,
4625,
4626,
4627,
4628,
4629,
4630,
4631,
4632,
4633,
4634,
4635,
4636,
4637,
4638,
4639,
4640,
4641,
4642,
4643,
4644,
4645,
4646,
4657,
4658,
4659,
4660,
4661,
4662,
4663,
4664,
4665,
4666,
4667,
4668,
4669,
4670,
4671,
4672,
4673,
4674,
4675,
4676,
4677,
4678,
4679,
4680,
4681,
4682,
4683,
4684,
4685,
4686,
4687,
4688,
4689,
4690,
4691,
4692,
4693,
4694,
4695,
4696,
4697,
4698,
4699,
4700,
4701,
4702,
4703,
4704,
4705,
4706,
4707,
4708,
4709,
4710,
4711,
4712,
4713,
4714,
4715,
4716,
4717,
4718,
4719,
4720,
4721,
4722,
4723,
4724,
4725,
4726,
4727,
4728,
4729,
4730,
4731,
4732,
4733,
4734,
4735,
4736,
4737,
4738,
4739,
4740,
4741,
4742,
4743,
4744,
4745,
4746,
4747,
4748,
4749,
4750,
4751,
4752,
4753,
4754,
4755,
4756,
4757,
4758,
4759,
4760,
4761,
4762,
4763,
4764,
4765,
4801,
4802,
4803,
4804,
4805,
4806,
4807,
4808,
4809,
4810,
4811,
4813,
4814,
4815,
4816,
4817,
4818,
4819,
4820,
| |
<reponame>mfeed/PySwitchLib<filename>pyswitchlib/asset.py
import requests
import weakref
import re
import os
import sys
import threading
import xml.etree.ElementTree as ElementTree
import xmltodict
import json
import atexit
import Pyro4
import Pyro4.util
import Pyro4.errors
from distutils.sysconfig import get_python_lib
import time
from requests.packages.urllib3.exceptions import SubjectAltNameWarning
requests.packages.urllib3.disable_warnings(SubjectAltNameWarning)
from pyswitchlib.util.configFile import ConfigFileUtil
import pyswitchlib.exceptions
locals().update(pyswitchlib.exceptions.__dict__)
sys.excepthook = Pyro4.util.excepthook
class Asset(object):
"""
This is an auto-generated class for the PySwitchLib device asset.
Asset provides connection information for PySwitchLib APIs.
"""
def __init__(self, ip_addr='', auth=('<PASSWORD>', 'password'), rest_proto=None, cacert=None, fw_ver='', timeout='', api_port=None):
def on_deletion (killed_ref):
self._cleanup_timer_handle()
self._session.close()
self._response.close()
atexit.register(self._cleanup_timer_handle)
self._weakref = weakref.ref(self, on_deletion)
self._ip_addr = ip_addr
self._auth = auth
self._rest_proto_input = ''
self._rest_protocol = 'http'
self._attempted_rest_protocols = []
self._enabled_rest_protocols = []
self._cacert_input = ''
self._os_type = 'unknown'
self._os_ver = fw_ver
self._os_full_ver = fw_ver
self._default_connection_timeout = 60
self._default_response_timeout = 1800
self._default_session_verify = False
self._session_timeout = (self._default_connection_timeout, self._default_response_timeout)
self._session = requests.Session()
self._response = requests.Response()
self._overall_success = True
self._overall_status = []
self._exc_info = None
self._rest_session_auth_max_retries = 1
self._rest_session_auth_token_expiration = 160
self._rest_session_auth_token_expired = '_EXPIRED_'
self._rest_session_auth_token = self._rest_session_auth_token_expired
self._rest_session_timer_handle = None
self._rest_config_path = '/rest/config/running'
self._rest_operational_path = '/rest/operational-state'
self._rest_rpc_path = '/rest/operational-state'
self._rest_discover_path = '/rest'
self._yang_list = None
self._module_obj = None
self._pyro_ns_port = None
self._pyro_proxy_name = ''
self._pyro_daemon_id = 'default'
self._pyro_bind_max_retries = 30
self._ns_pid_file = os.path.join(os.sep, 'etc', 'pyswitchlib', '.pyswitchlib_ns.pid')
self._pyswitchlib_conf_filename = os.path.join(os.sep, 'etc', 'pyswitchlib', 'pyswitchlib.conf')
self._pyswitchlib_ns_daemon_filename = os.path.join(os.sep, 'etc', 'pyswitchlib', '.pyswitchlib_ns_daemon.uri')
self._pyswitchlib_conf = ConfigFileUtil().read(filename=self._pyswitchlib_conf_filename)
self._pyswitchlib_ns_daemon = ConfigFileUtil().read(filename=self._pyswitchlib_ns_daemon_filename)
for key in self._pyswitchlib_conf:
if 'ns_port' == key:
self._pyro_ns_port = int(self._pyswitchlib_conf[key])
elif 'api_daemon_' in key:
if sys.prefix in self._pyswitchlib_conf[key]:
self._pyro_daemon_id = key
elif 'cacert' == key:
if cacert is None:
cacert = self._pyswitchlib_conf[key]
if api_port:
self._pyro_ns_port = api_port
if os.path.exists(self._ns_pid_file):
self._pyro_proxy_name = 'PYRONAME:PySwitchLib.' + self._pyro_daemon_id
if self._pyro_ns_port:
self._pyro_proxy_name += '@localhost:' + str(self._pyro_ns_port)
else:
if self._pyswitchlib_ns_daemon:
if self._pyro_daemon_id in self._pyswitchlib_ns_daemon:
self._pyro_proxy_name = self._pyswitchlib_ns_daemon[self._pyro_daemon_id]
if rest_proto is not None:
if rest_proto.lower() == 'http' or rest_proto.lower() == 'https' or rest_proto.lower() == 'auto':
self._rest_proto_input = rest_proto.lower()
if self._rest_proto_input == 'http' or self._rest_proto_input == 'https':
self._rest_protocol = self._rest_proto_input
else:
raise RestProtocolTypeError("Rest protocol type must be 'http', 'https', or 'auto'. '" + rest_proto + "' was specified.")
if cacert is not None:
self._cacert_input = cacert
if cacert:
if self._rest_protocol == 'https' or self._rest_proto_input == 'auto':
if os.path.isfile(cacert):
self._default_session_verify = cacert
else:
raise CACertificateNotFoundError("The CA certificate file '" + cacert + "' could not be found.")
else:
self._default_session_verify = False
elif cacert is False:
self._default_session_verify = False
else:
raise CACertificateNotSpecifiedError("The path to the CA certificate file is not specified.")
else:
self._default_session_verify = False
if timeout != '':
self._session_timeout = timeout
self._create_timer_handle()
self._discover_rest_protocol_and_paths()
self._update_fw_version()
self._supported_module_name = self._get_supported_module()
with Pyro4.Proxy(self._pyro_proxy_name) as pyro_proxy:
for n in range(self._pyro_bind_max_retries):
try:
pyro_proxy._pyroBind()
except (Pyro4.errors.NamingError, Pyro4.errors.CommunicationError) as e:
if n == 0:
if self._pyswitchlib_conf and 'ns_port' in self._pyswitchlib_conf:
bound_api_port = int(self._pyswitchlib_conf['ns_port'])
if bound_api_port and self._pyro_ns_port and bound_api_port != self._pyro_ns_port:
raise ExistingApiPortBound("API port: " + str(bound_api_port) + " is already bound.")
pyswitchlib_api_daemon = os.path.join(get_python_lib(), 'pyswitchlib', 'pyswitchlib_api_daemon.py')
pyswitchlib_api_start_string = 'python ' + pyswitchlib_api_daemon + ' start'
if self._pyro_ns_port:
pyswitchlib_api_start_string += ' ' + str(self._pyro_ns_port)
os.system(pyswitchlib_api_start_string)
else:
break
time.sleep(1)
else:
raise ApiDaemonConnectionError("Cannot connect to pyswitchlib_api_daemon.py.")
self._proxied = pyro_proxy
def __getattr__(self, name):
if hasattr(self._proxied, name):
def getattr_wrapper(*args, **kwargs):
self._proxied.api_acquire()
self._proxied.module_name(module_name=self._supported_module_name)
rest_operation_tuple = ()
try:
rest_operation_tuple = getattr(self._proxied, name)(*args, **kwargs)
except Exception as e:
raise e
finally:
self._proxied.api_release()
return self._rest_operation(rest_commands=rest_operation_tuple[0], yang_list=rest_operation_tuple[1], timeout=rest_operation_tuple[2])
return getattr_wrapper
else:
raise AttributeError(name)
def _rest_operation(self, rest_commands=None, yang_list=None, rest_proto=None, cacert=None, timeout=None):
auth = self._auth
auth_retries = 0
index = 0
rest_protocol = None
del self._overall_status[:]
if rest_proto is not None:
rest_protocol = rest_proto
else:
rest_protocol = self._rest_protocol
if cacert is not None:
self._session.verify = cacert
else:
self._session.verify = self._default_session_verify
if isinstance(timeout, basestring):
if timeout == '':
timeout = self._session_timeout
self._cleanup_timer_handle()
self._create_timer_handle()
while index < len(rest_commands):
rest_cmd = rest_commands[index]
if len(rest_cmd) < 4:
rest_cmd.append ("config")
if rest_cmd[3] == "config":
uri_prefix_path = self._rest_config_path
elif rest_cmd[3] == "operational":
uri_prefix_path = self._rest_operational_path
elif rest_cmd[3] == "rpc":
uri_prefix_path = self._rest_rpc_path
elif rest_cmd[3] == "discover":
uri_prefix_path = self._rest_discover_path
header = {"Resource-Depth" : str(rest_cmd[4])}
url = rest_protocol+"://"+self._ip_addr+uri_prefix_path
self._session.headers.update({'Content-Type': 'application/x-www-form-urlencoded'})
if self._rest_session_auth_token != self._rest_session_auth_token_expired:
self._session.headers.update({'Authentication-Token': self._rest_session_auth_token})
else:
if 'Authentication-Token' in self._session.headers:
self._session.headers.pop('Authentication-Token')
if rest_cmd[0] == "GET":
if self._rest_session_auth_token == self._rest_session_auth_token_expired:
self._response = self._session.get(url + rest_cmd[1], headers=header, auth=auth, timeout=timeout)
else:
self._response = self._session.get(url + rest_cmd[1], headers=header, timeout=timeout)
elif rest_cmd[0] == "POST":
if self._rest_session_auth_token == self._rest_session_auth_token_expired:
self._response = self._session.post(url + rest_cmd[1], auth=auth, data=rest_cmd[2], timeout=timeout)
else:
self._response = self._session.post(url + rest_cmd[1], data=rest_cmd[2], timeout=timeout)
elif rest_cmd[0] == "PUT":
if self._rest_session_auth_token == self._rest_session_auth_token_expired:
self._response = self._session.put(url + rest_cmd[1], auth=auth, data=rest_cmd[2], timeout=timeout)
else:
self._response = self._session.put(url + rest_cmd[1], data=rest_cmd[2], timeout=timeout)
elif rest_cmd[0] == "PATCH":
if self._rest_session_auth_token == self._rest_session_auth_token_expired:
self._response = self._session.patch(url + rest_cmd[1], auth=auth, data=rest_cmd[2], timeout=timeout)
else:
self._response = self._session.patch(url + rest_cmd[1], data=rest_cmd[2], timeout=timeout)
elif rest_cmd[0] == "DELETE":
if self._rest_session_auth_token == self._rest_session_auth_token_expired:
self._response = self._session.delete(url + rest_cmd[1], auth=auth, timeout=timeout)
else:
self._response = self._session.delete(url + rest_cmd[1], timeout=timeout)
if 'Authentication-Token' in self._response.headers:
self._rest_session_auth_token = self._response.headers['Authentication-Token']
json_output = json.loads('{"output": ""}')
text_response = self._response.text
if self._response.status_code >= 200 and self._response.status_code <= 299:
if re.match('^<', self._response.text):
if rest_cmd[3] != "rpc":
text_response = '<output>\r\n' + self._response.text + '</output>\r\n'
json_output = json.loads(self._xml_to_json(text_response))
else:
self._auth_token_expiration()
if self._response.status_code == 401 and auth_retries < self._rest_session_auth_max_retries:
auth_retries += 1
continue
if re.match('^<', self._response.text):
if re.match('^<output', self._response.text):
json_output = json.loads(self._xml_to_json(text_response))
else:
json_output = json.loads('{"output": ' + self._xml_to_json(text_response) + '}')
else:
json_output = json.loads('{"output": ' + json.dumps(str(self._response.text)) + '}')
if yang_list:
self._format_dict_output(container=json_output, keys=yang_list)
self._overall_status.append({self._ip_addr : {'request': {'op_code': rest_cmd[0], 'uri': rest_cmd[1], 'data': rest_cmd[2]}, 'response': {'status_code': self._response.status_code, 'url': self._response.url, 'text': self._response.text, 'json': json_output}}})
index += 1
if not self._rest_session_timer_handle.is_alive():
self._rest_session_timer_handle.start()
return self._get_results()
def _get_results(self):
self._overall_success = True
if self._overall_status:
for status in self._overall_status:
for key in status:
if (status[key]['response']['status_code'] < 200) or (status[key]['response']['status_code'] > 299):
self._overall_success = False
else:
self._overall_success = False
return self._overall_success, self._overall_status
def _discover_rest_protocol_and_paths(self):
status, result = self._do_rest_protocol_discovery(self._rest_proto_input)
if status == False:
self._raise_rest_validation_exception(result)
self._update_uri_prefix_paths(result)
def _update_fw_version(self):
rest_command = (
["POST", "/show-firmware-version", "", "rpc", 1],
)
self._rest_operation(rest_command, timeout=(self._default_connection_timeout, self._default_connection_timeout*2))
status, result = self._get_results()
if status == False:
self._raise_rest_validation_exception(result)
try:
rest_root = ElementTree.fromstring(re.sub(' xmlns[^ \t\n\r\f\v>]+', '', result[0][self._ip_addr]['response']['text']))
if rest_root.find('show-firmware-version').find('os-name') is not None:
if 'Network Operating System' in rest_root.find('show-firmware-version').find('os-name').text:
self._os_type = 'nos'
elif 'SLX' in rest_root.find('show-firmware-version').find('os-name').text:
self._os_type = 'slxos'
if 'Server' in self._response.headers:
if 'NOS' in self._response.headers['Server']:
self._os_type = 'nos'
elif 'SLX' in self._response.headers['Server']:
self._os_type = 'slxos'
if rest_root.find('show-firmware-version').find('firmware-full-version') is not None:
self._os_full_ver = rest_root.find('show-firmware-version').find('firmware-full-version').text
if rest_root.find('show-firmware-version').find('os-version') is not None:
self._os_ver = rest_root.find('show-firmware-version').find('os-version').text
if self._os_type == 'slxos':
slxos_ver = self._os_ver.split('.')
if len(slxos_ver) >= 2:
slxos_pattern_string = '^({0}[rs]{{1}})\.{1}\.'.format(slxos_ver[0], slxos_ver[1])
elif len(slxos_ver) == 1:
slxos_pattern_string = '^({0}[rs]{{1}})\.'.format(slxos_ver[0])
else:
slxos_pattern_string = '^(\d+[rs]{1})\.'
slxos_pattern = re.compile(slxos_pattern_string)
match = slxos_pattern.match(self._os_full_ver)
if match:
slxos_ver[0] = match.group(1)
self._os_ver = '.'.join(slxos_ver)
except:
pass
def _do_rest_protocol_discovery(self, rest_proto_input):
rest_command = (
["GET", "", "", "discover", 1],
)
overall_status = None
overall_result = None
if rest_proto_input == 'auto':
self._attempted_rest_protocols.append('http')
try:
self._rest_operation(rest_command, rest_proto='http', timeout=(self._default_connection_timeout, self._default_connection_timeout*2))
except:
pass
finally:
overall_status, overall_result = self._get_results()
if (overall_status == True):
self._enabled_rest_protocols.append('http')
self._attempted_rest_protocols.append('https')
try:
self._rest_operation(rest_command, rest_proto='https', cacert=False, timeout=(self._default_connection_timeout, self._default_connection_timeout*2))
except:
pass
finally:
status, result = self._get_results()
if (status == True):
self._session.close()
self._session = requests.Session()
self._session.verify = self._default_session_verify
self._enabled_rest_protocols.append('https')
self._rest_protocol = 'https'
overall_status = status
overall_result = result
else:
self._attempted_rest_protocols.append(self._rest_protocol)
try:
self._rest_operation(rest_command, timeout=(self._default_connection_timeout, self._default_connection_timeout*2))
except:
self._exc_info = sys.exc_info()
finally:
overall_status, overall_result = self._get_results()
if (overall_status == True):
self._enabled_rest_protocols.append(self._rest_protocol)
return overall_status, overall_result
def _update_uri_prefix_paths(self, result):
try:
rest_root = ElementTree.fromstring(re.sub(' xmlns[^ \t\n\r\f\v>]+|y:', '', result[0][self._ip_addr]['response']['text']))
if rest_root.find('config').find('running') is not None:
self._rest_config_path = rest_root.find('config').find('running').get('self')
if rest_root.find('operational-state') is not None:
self._rest_rpc_path = rest_root.find('operational-state').get('self')
self._rest_operational_path = rest_root.find('operational-state').get('self')
if rest_root.find('operations') is not None:
self._rest_rpc_path = rest_root.find('operations').get('self')
except:
pass
def _raise_rest_validation_exception(self, result):
if result:
if result[0][self._ip_addr]['response']['status_code'] == 401:
raise InvalidAuthenticationCredentialsError('Status Code: ' + str(result[0][self._ip_addr]['response']['status_code']) + ', Error: Invalid Authentication Credentials.')
elif result[0][self._ip_addr]['response']['status_code'] == 404:
raise RestInterfaceError('Status Code: ' + str(result[0][self._ip_addr]['response']['status_code']) + ', Error: Not Found.')
else:
try:
if self._exc_info:
raise self._exc_info[0], self._exc_info[1], self._exc_info[2]
except Exception as e:
raise RestInterfaceError('Could not establish a connection to ' + self._ip_addr + ' using ' + str(self._attempted_rest_protocols) + '. Reason: ' + str(e))
def _update_max_keep_alive_requests(self, max_requests=0):
return self.run_command(command="unhide foscmd;fibranne;foscmd sed \\'s/MaxKeepAliveRequests [0-9]*/MaxKeepAliveRequests " + str(max_requests) + "/\\' /fabos/webtools/bin/httpd.conf > /fabos/webtools/bin/httpd.conf.temp&&mv /fabos/webtools/bin/httpd.conf.temp /fabos/webtools/bin/httpd.conf&&/usr/apache/bin/apachectl -k restart &")
def _xml_to_json(self, xml=''):
return json.dumps(xmltodict.parse(xml))
def _get_supported_module(self):
pybind_dir = ''
site_dir = sys.path
for site_path in site_dir:
pybind_dir = os.path.join(site_path, 'pybind')
if | |
- 2"""
self.create_clean_ou("OU=ou1," + self.base_dn)
mod = "(A;CI;LC;;;%s)(A;CI;LC;;;%s)" % (str(self.user_sid), str(self.group_sid))
self.sd_utils.dacl_add_ace("OU=ou1," + self.base_dn, mod)
tmp_desc = security.descriptor.from_sddl("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)" + mod,
self.domain_sid)
self.ldb_admin.create_ou("OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
self.ldb_admin.create_ou("OU=ou3,OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
self.ldb_admin.create_ou("OU=ou4,OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
self.ldb_admin.create_ou("OU=ou5,OU=ou3,OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
self.ldb_admin.create_ou("OU=ou6,OU=ou4,OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
print "Testing correct behavior on nonaccessible search base"
try:
self.ldb_user3.search("OU=ou3,OU=ou2,OU=ou1," + self.base_dn, expression="(objectClass=*)",
scope=SCOPE_BASE)
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_OBJECT)
else:
self.fail()
mod = "(D;;LC;;;%s)(D;;LC;;;%s)" % (str(self.user_sid), str(self.group_sid))
self.sd_utils.dacl_add_ace("OU=ou2,OU=ou1," + self.base_dn, mod)
ok_list = [Dn(self.ldb_admin, "OU=ou2,OU=ou1," + self.base_dn),
Dn(self.ldb_admin, "OU=ou1," + self.base_dn)]
res = self.ldb_user3.search("OU=ou1," + self.base_dn, expression="(objectClass=*)",
scope=SCOPE_SUBTREE)
res_list = [ x["dn"] for x in res if x["dn"] in ok_list ]
self.assertEquals(sorted(res_list), sorted(ok_list))
ok_list = [Dn(self.ldb_admin, "OU=ou2,OU=ou1," + self.base_dn),
Dn(self.ldb_admin, "OU=ou1," + self.base_dn),
Dn(self.ldb_admin, "OU=ou5,OU=ou3,OU=ou2,OU=ou1," + self.base_dn),
Dn(self.ldb_admin, "OU=ou6,OU=ou4,OU=ou2,OU=ou1," + self.base_dn)]
#should not see ou3 and ou4, but should see ou5 and ou6
res = self.ldb_user.search("OU=ou1," + self.base_dn, expression="(objectClass=*)",
scope=SCOPE_SUBTREE)
self.assertEquals(len(res), 4)
res_list = [ x["dn"] for x in res if x["dn"] in ok_list ]
self.assertEquals(sorted(res_list), sorted(ok_list))
res = self.ldb_user2.search("OU=ou1," + self.base_dn, expression="(objectClass=*)",
scope=SCOPE_SUBTREE)
self.assertEquals(len(res), 4)
res_list = [ x["dn"] for x in res if x["dn"] in ok_list ]
self.assertEquals(sorted(res_list), sorted(ok_list))
def test_search4(self):
"""There is no difference in visibility if the user is also creator"""
self.create_clean_ou("OU=ou1," + self.base_dn)
mod = "(A;CI;CC;;;%s)" % (str(self.user_sid))
self.sd_utils.dacl_add_ace("OU=ou1," + self.base_dn, mod)
tmp_desc = security.descriptor.from_sddl("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)" + mod,
self.domain_sid)
self.ldb_user.create_ou("OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
self.ldb_user.create_ou("OU=ou3,OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
self.ldb_user.create_ou("OU=ou4,OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
self.ldb_user.create_ou("OU=ou5,OU=ou3,OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
self.ldb_user.create_ou("OU=ou6,OU=ou4,OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
ok_list = [Dn(self.ldb_admin, "OU=ou2,OU=ou1," + self.base_dn),
Dn(self.ldb_admin, "OU=ou1," + self.base_dn)]
res = self.ldb_user3.search("OU=ou1," + self.base_dn, expression="(objectClass=*)",
scope=SCOPE_SUBTREE)
self.assertEquals(len(res), 2)
res_list = [ x["dn"] for x in res if x["dn"] in ok_list ]
self.assertEquals(sorted(res_list), sorted(ok_list))
res = self.ldb_user.search("OU=ou1," + self.base_dn, expression="(objectClass=*)",
scope=SCOPE_SUBTREE)
self.assertEquals(len(res), 2)
res_list = [ x["dn"] for x in res if x["dn"] in ok_list ]
self.assertEquals(sorted(res_list), sorted(ok_list))
def test_search5(self):
"""Make sure users can see only attributes they are allowed to see"""
self.create_clean_ou("OU=ou1," + self.base_dn)
mod = "(A;CI;LC;;;%s)" % (str(self.user_sid))
self.sd_utils.dacl_add_ace("OU=ou1," + self.base_dn, mod)
tmp_desc = security.descriptor.from_sddl("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)" + mod,
self.domain_sid)
self.ldb_admin.create_ou("OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
# assert user can only see dn
res = self.ldb_user.search("OU=ou2,OU=ou1," + self.base_dn, expression="(objectClass=*)",
scope=SCOPE_SUBTREE)
ok_list = ['dn']
self.assertEquals(len(res), 1)
res_list = res[0].keys()
self.assertEquals(res_list, ok_list)
res = self.ldb_user.search("OU=ou2,OU=ou1," + self.base_dn, expression="(objectClass=*)",
scope=SCOPE_BASE, attrs=["ou"])
self.assertEquals(len(res), 1)
res_list = res[0].keys()
self.assertEquals(res_list, ok_list)
#give read property on ou and assert user can only see dn and ou
mod = "(OA;;RP;bf9679f0-0de6-11d0-a285-00aa003049e2;;%s)" % (str(self.user_sid))
self.sd_utils.dacl_add_ace("OU=ou1," + self.base_dn, mod)
self.sd_utils.dacl_add_ace("OU=ou2,OU=ou1," + self.base_dn, mod)
res = self.ldb_user.search("OU=ou2,OU=ou1," + self.base_dn, expression="(objectClass=*)",
scope=SCOPE_SUBTREE)
ok_list = ['dn', 'ou']
self.assertEquals(len(res), 1)
res_list = res[0].keys()
self.assertEquals(sorted(res_list), sorted(ok_list))
#give read property on Public Information and assert user can see ou and other members
mod = "(OA;;RP;e48d0154-bcf8-11d1-8702-00c04fb96050;;%s)" % (str(self.user_sid))
self.sd_utils.dacl_add_ace("OU=ou1," + self.base_dn, mod)
self.sd_utils.dacl_add_ace("OU=ou2,OU=ou1," + self.base_dn, mod)
res = self.ldb_user.search("OU=ou2,OU=ou1," + self.base_dn, expression="(objectClass=*)",
scope=SCOPE_SUBTREE)
ok_list = ['dn', 'objectClass', 'ou', 'distinguishedName', 'name', 'objectGUID', 'objectCategory']
res_list = res[0].keys()
self.assertEquals(sorted(res_list), sorted(ok_list))
def test_search6(self):
"""If an attribute that cannot be read is used in a filter, it is as if the attribute does not exist"""
self.create_clean_ou("OU=ou1," + self.base_dn)
mod = "(A;CI;LCCC;;;%s)" % (str(self.user_sid))
self.sd_utils.dacl_add_ace("OU=ou1," + self.base_dn, mod)
tmp_desc = security.descriptor.from_sddl("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)" + mod,
self.domain_sid)
self.ldb_admin.create_ou("OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
self.ldb_user.create_ou("OU=ou3,OU=ou2,OU=ou1," + self.base_dn, sd=tmp_desc)
res = self.ldb_user.search("OU=ou1," + self.base_dn, expression="(ou=ou3)",
scope=SCOPE_SUBTREE)
#nothing should be returned as ou is not accessible
self.assertEquals(len(res), 0)
#give read property on ou and assert user can only see dn and ou
mod = "(OA;;RP;bf9679f0-0de6-11d0-a285-00aa003049e2;;%s)" % (str(self.user_sid))
self.sd_utils.dacl_add_ace("OU=ou3,OU=ou2,OU=ou1," + self.base_dn, mod)
res = self.ldb_user.search("OU=ou1," + self.base_dn, expression="(ou=ou3)",
scope=SCOPE_SUBTREE)
self.assertEquals(len(res), 1)
ok_list = ['dn', 'ou']
res_list = res[0].keys()
self.assertEquals(sorted(res_list), sorted(ok_list))
#give read property on Public Information and assert user can see ou and other members
mod = "(OA;;RP;e48d0154-bcf8-11d1-8702-00c04fb96050;;%s)" % (str(self.user_sid))
self.sd_utils.dacl_add_ace("OU=ou2,OU=ou1," + self.base_dn, mod)
res = self.ldb_user.search("OU=ou1," + self.base_dn, expression="(ou=ou2)",
scope=SCOPE_SUBTREE)
self.assertEquals(len(res), 1)
ok_list = ['dn', 'objectClass', 'ou', 'distinguishedName', 'name', 'objectGUID', 'objectCategory']
res_list = res[0].keys()
self.assertEquals(sorted(res_list), sorted(ok_list))
#tests on ldap delete operations
class AclDeleteTests(AclTests):
def setUp(self):
super(AclDeleteTests, self).setUp()
self.regular_user = "acl_delete_user1"
# Create regular user
self.ldb_admin.newuser(self.regular_user, self.user_pass)
self.ldb_user = self.get_ldb_connection(self.regular_user, self.user_pass)
def tearDown(self):
super(AclDeleteTests, self).tearDown()
delete_force(self.ldb_admin, self.get_user_dn("test_delete_user1"))
delete_force(self.ldb_admin, self.get_user_dn(self.regular_user))
delete_force(self.ldb_admin, self.get_user_dn("test_anonymous"))
def test_delete_u1(self):
"""User is prohibited by default to delete another User object"""
# Create user that we try to delete
self.ldb_admin.newuser("test_delete_user1", self.user_pass)
# Here delete User object should ALWAYS through exception
try:
self.ldb_user.delete(self.get_user_dn("test_delete_user1"))
except LdbError, (num, _):
self.assertEquals(num, ERR_INSUFFICIENT_ACCESS_RIGHTS)
else:
self.fail()
def test_delete_u2(self):
"""User's group has RIGHT_DELETE to another User object"""
user_dn = self.get_user_dn("test_delete_user1")
# Create user that we try to delete
self.ldb_admin.newuser("test_delete_user1", self.user_pass)
mod = "(A;;SD;;;AU)"
self.sd_utils.dacl_add_ace(user_dn, mod)
# Try to delete User object
self.ldb_user.delete(user_dn)
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % user_dn)
self.assertEqual(len(res), 0)
def test_delete_u3(self):
"""User indentified by SID has RIGHT_DELETE to another User object"""
user_dn = self.get_user_dn("test_delete_user1")
# Create user that we try to delete
self.ldb_admin.newuser("test_delete_user1", self.user_pass)
mod = "(A;;SD;;;%s)" % self.sd_utils.get_object_sid(self.get_user_dn(self.regular_user))
self.sd_utils.dacl_add_ace(user_dn, mod)
# Try to delete User object
self.ldb_user.delete(user_dn)
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % user_dn)
self.assertEqual(len(res), 0)
def test_delete_anonymous(self):
"""Test add operation with anonymous user"""
anonymous = SamDB(url=ldaphost, credentials=self.creds_tmp, lp=lp)
self.ldb_admin.newuser("test_anonymous", "samba123@")
try:
anonymous.delete(self.get_user_dn("test_anonymous"))
except LdbError, (num, _):
self.assertEquals(num, ERR_OPERATIONS_ERROR)
else:
self.fail()
#tests on ldap rename operations
class AclRenameTests(AclTests):
def setUp(self):
super(AclRenameTests, self).setUp()
self.regular_user = "acl_rename_user1"
self.ou1 = "OU=test_rename_ou1"
self.ou2 = "OU=test_rename_ou2"
self.ou3 = "OU=test_rename_ou3,%s" % self.ou2
self.testuser1 = "test_rename_user1"
self.testuser2 = "test_rename_user2"
self.testuser3 = "test_rename_user3"
self.testuser4 = "test_rename_user4"
self.testuser5 = "test_rename_user5"
# Create regular user
self.ldb_admin.newuser(self.regular_user, self.user_pass)
self.ldb_user = self.get_ldb_connection(self.regular_user, self.user_pass)
def tearDown(self):
super(AclRenameTests, self).tearDown()
# Rename OU3
delete_force(self.ldb_admin, "CN=%s,%s,%s" % (self.testuser1, self.ou3, self.base_dn))
delete_force(self.ldb_admin, "CN=%s,%s,%s" % (self.testuser2, self.ou3, self.base_dn))
delete_force(self.ldb_admin, "CN=%s,%s,%s" % (self.testuser5, self.ou3, self.base_dn))
delete_force(self.ldb_admin, "%s,%s" % (self.ou3, self.base_dn))
# Rename OU2
delete_force(self.ldb_admin, "CN=%s,%s,%s" % (self.testuser1, self.ou2, self.base_dn))
delete_force(self.ldb_admin, "CN=%s,%s,%s" % (self.testuser2, self.ou2, self.base_dn))
delete_force(self.ldb_admin, "CN=%s,%s,%s" % (self.testuser5, self.ou2, self.base_dn))
delete_force(self.ldb_admin, "%s,%s" % (self.ou2, self.base_dn))
# Rename OU1
delete_force(self.ldb_admin, "CN=%s,%s,%s" % (self.testuser1, self.ou1, self.base_dn))
delete_force(self.ldb_admin, "CN=%s,%s,%s" % (self.testuser2, self.ou1, self.base_dn))
delete_force(self.ldb_admin, "CN=%s,%s,%s" % (self.testuser5, self.ou1, self.base_dn))
delete_force(self.ldb_admin, "OU=test_rename_ou3,%s,%s" % (self.ou1, self.base_dn))
delete_force(self.ldb_admin, "%s,%s" % (self.ou1, self.base_dn))
delete_force(self.ldb_admin, self.get_user_dn(self.regular_user))
def test_rename_u1(self):
"""Regular user fails to rename 'User object' within single OU"""
# Create OU structure
self.ldb_admin.create_ou("OU=test_rename_ou1," + self.base_dn)
self.ldb_admin.newuser(self.testuser1, self.user_pass, userou=self.ou1)
try:
self.ldb_user.rename("CN=%s,%s,%s" % (self.testuser1, self.ou1, self.base_dn), \
"CN=%s,%s,%s" % (self.testuser5, self.ou1, self.base_dn))
except LdbError, (num, _):
self.assertEquals(num, ERR_INSUFFICIENT_ACCESS_RIGHTS)
else:
self.fail()
def test_rename_u2(self):
"""Grant WRITE_PROPERTY to AU so regular user can rename 'User object' within single OU"""
ou_dn = "OU=test_rename_ou1," + self.base_dn
user_dn = "CN=test_rename_user1," + ou_dn
rename_user_dn = "CN=test_rename_user5," + ou_dn
# Create OU structure
self.ldb_admin.create_ou(ou_dn)
self.ldb_admin.newuser(self.testuser1, self.user_pass, userou=self.ou1)
mod = "(A;;WP;;;AU)"
self.sd_utils.dacl_add_ace(user_dn, mod)
# Rename 'User object' having WP to AU
self.ldb_user.rename(user_dn, rename_user_dn)
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % user_dn)
self.assertEqual(len(res), 0)
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % rename_user_dn)
self.assertNotEqual(len(res), 0)
def test_rename_u3(self):
"""Test rename with rights granted to 'User object' SID"""
ou_dn = "OU=test_rename_ou1," + self.base_dn
user_dn = "CN=test_rename_user1," + ou_dn
rename_user_dn = "CN=test_rename_user5," + ou_dn
# Create OU structure
self.ldb_admin.create_ou(ou_dn)
self.ldb_admin.newuser(self.testuser1, self.user_pass, userou=self.ou1)
sid = self.sd_utils.get_object_sid(self.get_user_dn(self.regular_user))
mod = "(A;;WP;;;%s)" % str(sid)
self.sd_utils.dacl_add_ace(user_dn, mod)
# Rename 'User object' having WP to AU
self.ldb_user.rename(user_dn, rename_user_dn)
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % user_dn)
self.assertEqual(len(res), 0)
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % rename_user_dn)
self.assertNotEqual(len(res), 0)
def test_rename_u4(self):
"""Rename 'User object' cross OU with WP, SD and CC right granted on reg. user to AU"""
ou1_dn = "OU=test_rename_ou1," + self.base_dn
ou2_dn = "OU=test_rename_ou2," + self.base_dn
user_dn = "CN=test_rename_user2," + ou1_dn
rename_user_dn = "CN=test_rename_user5," + ou2_dn
# Create OU structure
self.ldb_admin.create_ou(ou1_dn)
self.ldb_admin.create_ou(ou2_dn)
self.ldb_admin.newuser(self.testuser2, self.user_pass, userou=self.ou1)
mod = "(A;;WPSD;;;AU)"
self.sd_utils.dacl_add_ace(user_dn, mod)
mod = "(A;;CC;;;AU)"
self.sd_utils.dacl_add_ace(ou2_dn, mod)
# Rename 'User object' having SD and CC to AU
self.ldb_user.rename(user_dn, rename_user_dn)
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % user_dn)
self.assertEqual(len(res), 0)
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % rename_user_dn)
self.assertNotEqual(len(res), 0)
def test_rename_u5(self):
"""Test rename with rights granted to 'User object' SID"""
ou1_dn = "OU=test_rename_ou1," + self.base_dn
ou2_dn = "OU=test_rename_ou2," + self.base_dn
user_dn = "CN=test_rename_user2," + ou1_dn
rename_user_dn = "CN=test_rename_user5," + ou2_dn
# Create OU structure
self.ldb_admin.create_ou(ou1_dn)
self.ldb_admin.create_ou(ou2_dn)
self.ldb_admin.newuser(self.testuser2, self.user_pass, userou=self.ou1)
sid = self.sd_utils.get_object_sid(self.get_user_dn(self.regular_user))
mod = "(A;;WPSD;;;%s)" % str(sid)
self.sd_utils.dacl_add_ace(user_dn, mod)
mod = "(A;;CC;;;%s)" % str(sid)
self.sd_utils.dacl_add_ace(ou2_dn, mod)
# Rename 'User object' having SD and CC to AU
self.ldb_user.rename(user_dn, rename_user_dn)
res | |
# Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and
# other Shroud Project Developers.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (BSD-3-Clause)
"""
"""
import yaml
from . import util
# The tree of c and fortran statements.
cf_tree = {}
fc_dict = {} # dictionary of Scope of all expanded fc_statements.
default_scopes = dict()
def lookup_c_statements(arg):
"""Look up the c_statements for an argument.
If the argument type is a template, look for
template specialization.
Args:
arg -
"""
arg_typemap = arg.typemap
specialize = []
if arg.template_arguments:
arg_typemap = arg.template_arguments[0].typemap
specialize.append(arg_typemap.sgroup)
return arg_typemap, specialize
def lookup_fc_stmts(path):
return lookup_stmts_tree(cf_tree, path)
def compute_name(path, char="_"):
"""
Compute a name from a list of components.
Blank entries are filtered out.
Used to find C_error_pattern.
Args:
path - list of name components.
"""
work = [ part for part in path if part ] # skip empty components
return char.join(work)
def lookup_local_stmts(path, parent, node):
"""Look in node.fstatements for additional statements.
XXX - Only used with result.
mode - "update", "replace"
Args:
path - list of path components ["c", "buf"]
parent - parent Scope.
node - FunctionNode.
"""
name = compute_name(path)
blk = node.fstatements.get(name, None)
if blk:
mode = blk.get("mode", "update")
if mode == "update":
blk.reparent(parent)
return blk
return parent
def assign_buf_variable_names(attrs, meta, options, fmt, rootname):
"""
Transfer names from attribute to fmt.
"""
# XXX - make sure they don't conflict with other names.
if meta["capsule"]:
fmt.c_var_capsule = options.C_var_capsule_template.format(
c_var=rootname)
if attrs["cdesc"]:
# XXX - c_var_cdesc is set via Stmts.temps=["cdesc"]
# XXX not sure if this is needed still.
fmt.c_var_cdesc2 = options.C_var_context_template.format(
c_var=rootname)
def compute_return_prefix(arg, local_var):
"""Compute how to access variable: dereference, address, as-is"""
if local_var == "scalar":
if arg.is_pointer():
return "&"
else:
return ""
elif local_var == "pointer":
if arg.is_pointer():
return ""
else:
return "*"
elif local_var == "funcptr":
return ""
elif arg.is_reference():
# Convert a return reference into a pointer.
return "&"
else:
return ""
def update_statements_for_language(language):
"""Preprocess statements for lookup.
Update statements for c or c++.
Fill in cf_tree.
Parameters
----------
language : str
"c" or "c++"
"""
update_for_language(fc_statements, language)
update_stmt_tree(fc_statements, fc_dict, cf_tree, default_stmts)
def update_for_language(stmts, lang):
"""
Move language specific entries to current language.
stmts=[
dict(
name='foo_bar',
c_declare=[],
cxx_declare=[],
),
...
]
For lang==c,
foo_bar["declare"] = foo_bar["c_declare"]
"""
for item in stmts:
for clause in [
"impl_header",
"cxx_local_var",
"declare",
"post_parse",
"pre_call",
"post_call",
"cleanup",
"fail",
]:
specific = lang + "_" + clause
if specific in item:
# XXX - maybe make sure clause does not already exist.
item[clause] = item[specific]
def compute_stmt_permutations(out, parts):
"""Expand parts which have multiple values
Ex: parts =
[['c'], ['in', 'out', 'inout'], ['native'], ['*'], ['cfi']]
Three entries will be appended to out:
['c', 'in', 'native', '*', 'cfi']
['c', 'out', 'native', '*', 'cfi']
['c', 'inout', 'native', '*', 'cfi']
Parameters
----------
out : list
Results are appended to the list.
parts :
"""
tmp = []
for i, part in enumerate(parts):
if isinstance(part, list):
if len(part) == 1:
tmp.append(part[0])
else:
for expand in part:
compute_stmt_permutations(
out, tmp + [expand] + parts[i+1:])
break
else:
tmp.append(part)
else:
out.append(tmp)
def add_statement_to_tree(tree, nodes, node_stmts, node, steps):
"""Add node to tree.
Parameters
----------
tree : dict
The accumulated tree.
nodes : dict
Scopes indexed by name to implement 'base'.
node_stmts : dict
nodes indexed by name to implement 'mixin'.
node : dict
A 'statements' dict from fc_statement to add.
steps : list of str
['c', 'native', '*', 'in', 'cfi']
"""
step = tree
label = []
for part in steps:
step = step.setdefault(part, {})
label.append(part)
step["_key"] = "_".join(label)
if "base" in node:
step['_node'] = node
scope = util.Scope(nodes[node["base"]])
else:
step['_node'] = node
scope = util.Scope(default_scopes[steps[0]])
if "mixin" in node:
for mpart in node["mixin"]:
scope.update(node_stmts[mpart])
scope.update(node)
step["_stmts"] = scope
name = step["_key"]
# Name scope using variant name (ex in/out/inout).
scope.name = name
nodes[name] = scope
node_stmts[name] = node
return scope
def update_stmt_tree(stmts, nodes, tree, defaults):
"""Update tree by adding stmts. Each key in stmts is split by
underscore then inserted into tree to form nested dictionaries to
the values from stmts. The end key is named _node, since it is
impossible to have an intermediate element with that name (since
they're split on underscore).
Implement "base" field. Base must be defined before use.
Add "_key" to tree to aid debugging.
Each typemap is converted into a Scope instance with the parent
based on the language (c or f) and added as "scope" field.
This additional layer of indirection is needed to implement base.
stmts = [
{name="c_in_native",} # value1
{name="c_out_native",} # value2
{name="c_out_native_pointer",} # value3
{name="c_in_string",} # value4
]
tree = {
"c": {
"in": {
"native": {"_node":value1},
"string": {"_node":value4},
},
"out": {
"native":{"_node":value2},
"pointer":{
"out":{"_node":value3},
},
},
},
}
Parameters
----------
stmts : dict
nodes : dict
Created Scope members for 'base'.
tree : dict
defaults: dict
"""
# Convert defaults into Scope nodes.
for key, node in defaults.items():
default_scopes[key] = node
# Index by name to find alias, base, mixin.
node_stmts = {} # Dict from fc_statements for 'mixin'.
nodes.clear() # Allow function to be called multiple times.
for node in stmts:
# node is a dict.
if "name" not in node:
raise RuntimeError("Missing name in statements: {}".
format(str(node)))
for node in stmts:
key = node["name"]
steps = key.split("_")
substeps = []
for part in steps:
subparts = part.split("/")
substeps.append(subparts)
expanded = []
compute_stmt_permutations(expanded, substeps)
for namelst in expanded:
name = "_".join(namelst)
if name in nodes:
raise RuntimeError("Duplicate key in statements: {}".
format(name))
stmt = add_statement_to_tree(tree, nodes, node_stmts, node, namelst)
stmt.intent = namelst[1]
# check for consistency
if key[0] == "c":
if (stmt.c_arg_decl is not None or
stmt.f_arg_decl is not None or
stmt.f_c_arg_names is not None):
err = False
if stmt.c_arg_decl is None:
err = True
print("Missing c_arg_decl in", node["name"])
if stmt.f_arg_decl is None:
err = True
print("Missing f_arg_decl in", node["name"])
if stmt.f_c_arg_names is None:
err = True
print("Missing f_c_arg_names in", node["name"])
if err:
raise RuntimeError(
"c_arg_decl, f_arg_decl and f_c_arg_names must all exist")
length = len(stmt.c_arg_decl)
if any(len(lst) != length for lst in [stmt.f_arg_decl, stmt.f_c_arg_names]):
raise RuntimeError(
"c_arg_decl, f_arg_decl and f_c_arg_names "
"must all be same length in {}".format(node["name"]))
def write_cf_tree(fp):
"""Write out statements tree.
Parameters
----------
fp : file
"""
lines = []
print_tree_index(cf_tree, lines)
fp.writelines(lines)
print_tree_statements(fp, fc_dict, default_stmts)
def print_tree_index(tree, lines, indent=""):
"""Print statements search tree index.
Intermediate nodes are prefixed with --.
Useful for debugging.
Parameters
----------
fp : file
lines : list
list of output lines
indent : str
indention for recursion.
"""
parts = tree.get('_key', 'root').split('_')
if "_node" in tree:
# final = '' # + tree["_node"]["scope"].name + '-'
origname = tree["_node"]["name"]
lines.append("{}{} -- {}\n".format(indent, parts[-1], origname))
else:
lines.append("{}{}\n".format(indent, parts[-1]))
indent += ' '
for key in sorted(tree.keys()):
if key == '_node':
continue
if key == 'scope':
continue
if key == '_key':
continue
value = tree[key]
if isinstance(value, dict):
print_tree_index(value, lines, indent)
def print_tree_statements(fp, statements, defaults):
"""Print expanded statements.
Statements may not have all values directly defined since 'base'
and 'mixin' brings in other values. This will dump the values as
used by Shroud.
Statements
----------
fp : file
statements : dict
defaults : dict
"""
# Convert Scope into a dictionary for YAML.
# Add all non-null values from the default dict.
yaml.SafeDumper.ignore_aliases = lambda *args : True
complete = {}
for name in sorted(statements.keys()):
root = name.split("_", 1)[0]
base = defaults[root]
value = statements[name]
all = {}
for key in base.__dict__.keys():
if key[0] == "_":
continue
if value[key]:
all[key] = value[key]
complete[name] = all
yaml.safe_dump(complete, fp)
def lookup_stmts_tree(tree, path):
"""
Lookup path in statements tree.
Look for longest path which matches.
Used to find specific cases first, then fall back to general.
ex path = ['result', 'allocatable']
Finds 'result_allocatable' if it exists, else 'result'.
If not found, return an empty dictionary.
path typically consists of:
in, out, inout, result
generated_clause - buf
deref - allocatable
Args:
tree - dictionary of nested dictionaries
path - list of name components.
Blank | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
import json
import copy
from enum import Enum
from vulkan_header_parser.vulkan_class_name import vulkan_class_name
from vulkan_header_parser.to_extension_name_def import to_extension_name_def
e_rule = re.compile( '^e(.*)' )
class vulkan_enum:
def __init__( self, name_, ext_, defs_ ):
self.name = vulkan_class_name( name_ )
self.ext = ext_
self.ext_def = to_extension_name_def( self.ext )
self.static_defs = copy.deepcopy( defs_ )
self.from_table = []
self.to_table = []
self.conditional = len( self.static_defs ) != 0
def add( self, name_, cname_, defs_ ):
xor_defs = {}
for d in defs_.keys():
if not d in self.static_defs:
xor_defs[ d ] = defs_[ d ]
e_match = re.match( e_rule, name_ )
if e_match:
self.from_table.append(
( e_match.group( 1 ), name_, xor_defs )
)
self.from_table.append(
( name_, name_, xor_defs )
)
self.from_table.append(
( cname_, name_, xor_defs )
)
self.to_table.append(
( name_, e_match.group( 1 ), xor_defs )
)
if len( xor_defs ):
self.conditional = True
def __str__( self ):
return json.dumps(
{
"ext_suffix" : self.name.ext_suffix,
"version_suffix" : self.name.version_suffix,
"name" : self.name.get_enum(),
"ext" : self.ext,
"ext_def" : self.ext_def,
"from_table": [ [ v[ 0 ], v[ 1 ] ] for v in self.from_table ],
"to_table": [ [ v[ 0 ], v[ 1 ] ] for v in self.to_table ]
},
indent=2
)
def generate_impl( self ):
name = self.name.get_name()
cname = self.name.get_cname()
inline = ""
if self.conditional:
inline = "inline "
m = ""
if len( self.static_defs ):
m += '#if ' + ' && '.join( [ x for x in self.static_defs.keys() ] ) + '\n'
m += "namespace VULKAN_HPP_NAMESPACE {\n"
m += "%svoid to_json( nlohmann::json &j, const %s &p ) {\n" % ( inline, name )
for v in self.to_table:
if len( v[ 2 ] ):
m += "#if " + ' && '.join( [ x for x in v[ 2 ].keys() ] ) + '\n'
m += " if( %s :: %s == p ) {\n j = \"%s\";\n return;\n }\n" % ( name, v[ 0 ], v[ 1 ] )
if len( v[ 2 ] ):
m += "#endif\n"
m += "}\n"
m += "}\n"
m += "%svoid to_json( nlohmann::json &j, const %s &p ) {\n" % ( inline, cname )
m += " to_json( j, VULKAN_HPP_NAMESPACE :: %s ( p ) );\n" % name
m += "}\n"
m += "namespace VULKAN_HPP_NAMESPACE {\n"
m += "%svoid from_json( const nlohmann::json &j, %s &p ) {\n" % ( inline, name )
m += " if( j.is_string() ) {\n"
for v in self.from_table:
if len( v[ 2 ] ):
m += "#if " + ' && '.join( [ x for x in v[ 2 ].keys() ] ) + '\n'
m += " if( \"%s\" == j.get< std::string >() ) {\n p = %s :: %s ;\n return;\n }\n" % ( v[ 0 ], name, v[ 1 ] )
if len( v[ 2 ] ):
m += "#endif\n"
m += " throw vulkan2json::invalid_enum_value( \"unknown enum name for %s\" );\n" % name
m += " }\n"
m += " if( j.is_number() ) {\n"
m += " p = %s ( j.get< std::int64_t >() );\n" % name
m += " }\n"
m += " throw vulkan2json::invalid_enum_value( \"incompatible value for %s\" );\n" % name
m += "}\n"
m += "}\n"
m += "%svoid from_json( const nlohmann::json &j, %s &p ) {\n" % ( inline, cname )
m += " VULKAN_HPP_NAMESPACE :: %s temp;\n" % name
m += " from_json( j, temp );\n"
m += " p = %s ( temp );\n" % cname
m += "}\n"
if len( self.static_defs ):
m += "#endif\n"
return m;
def generate_forward( self ):
name = self.name.get_name()
cname = self.name.get_cname()
m = ""
if len( self.static_defs ):
m += '#if ' + ' && '.join( [ x for x in self.static_defs.keys() ] ) + '\n'
m += "namespace VULKAN_HPP_NAMESPACE {\n"
m += "void to_json( nlohmann::json &j, const %s &p );\n" % name
m += "}\n"
m += "void to_json( nlohmann::json &j, const %s &p );\n" % cname
m += "namespace VULKAN_HPP_NAMESPACE {\n"
m += "void from_json( const nlohmann::json &j, %s &p );\n" % name
m += "}\n"
m += "void from_json( const nlohmann::json &j, %s &p );\n" % cname
if len( self.static_defs ):
m += "#endif\n"
return m;
def generate_test( self ):
name = self.name.get_name()
cname = self.name.get_cname()
m = ""
m += "#include <vulkan2json/%s.hpp>\n" % name
m += "BOOST_AUTO_TEST_CASE(%s) {\n" % name
if len( self.static_defs ):
m += '#if ' + ' && '.join( [ x for x in self.static_defs.keys() ] ) + '\n'
for v in self.to_table:
if len( v[ 2 ] ):
m += "#if " + ' && '.join( [ x for x in v[ 2 ].keys() ] ) + '\n'
m += " {\n";
m += " const auto original = VULKAN_HPP_NAMESPACE :: %s :: %s ;\n" % ( name, v[ 0 ] )
m += " const nlohmann::json expected = \"%s\";\n" % v[ 1 ]
m += " const nlohmann::json serialized = original;\n"
m += " const auto deserialized = VULKAN_HPP_NAMESPACE :: %s ( serialized );\n" % name
m += " BOOST_CHECK( deserialized == original );\n"
m += " }\n";
if len( v[ 2 ] ):
m += "#endif\n"
if len( self.static_defs ):
m += "#endif\n"
m += "}\n"
return m;
class vulkan_flag:
def __init__( self, name_, ext_, defs_, has_cname_ ):
self.name = vulkan_class_name( name_ )
self.name.remove_flagbits()
self.ext = ext_
self.ext_def = to_extension_name_def( self.ext )
self.static_defs = defs_
self.from_table = []
self.to_table = []
self.has_cname = has_cname_
self.conditional = len( self.static_defs ) != 0
def add( self, name_, cname_, defs_ ):
xor_defs = {}
for d in defs_.keys():
if d in self.static_defs:
xor_defs[ d ] = defs_[ d ]
e_match = re.match( e_rule, name_ )
if e_match:
self.from_table.append(
( e_match.group( 1 ), name_, xor_defs )
)
self.from_table.append(
( name_, name_, xor_defs )
)
self.from_table.append(
( cname_, name_, xor_defs )
)
self.to_table.append(
( name_, e_match.group( 1 ), xor_defs )
)
if len( xor_defs ):
self.conditional = True
def __str__( self ):
return json.dumps(
{
"ext_suffix" : self.name.ext_suffix,
"version_suffix" : self.name.version_suffix,
"name" : self.name.name,
"ext" : self.ext,
"ext_def" : self.ext_def,
"has_cname" : self.has_cname,
"from_table": [ [ v[ 0 ], v[ 1 ] ] for v in self.from_table ],
"to_table": [ [ v[ 0 ], v[ 1 ] ] for v in self.to_table ]
},
indent=2
)
def generate_impl( self ):
flagbits = self.name.get_flagbits()
flags = self.name.get_flags()
inline = ""
if self.conditional:
inline = "inline "
m = ""
if len( self.static_defs ):
m += "#if " + ' && '.join( [ x for x in v[ 2 ].keys() ] ) + '\n'
m += "namespace VULKAN_HPP_NAMESPACE {\n"
m += "%svoid to_json( nlohmann::json &j, const %s &p ) {\n" % ( inline, flagbits )
for v in self.to_table:
if len( v[ 2 ] ):
m += "#if " + ' && '.join( [ x for x in v[ 2 ].keys() ] ) + '\n'
| |
#!/usr/bin/python
from p4_hlir.main import HLIR
from p4_hlir.hlir.p4_parser import p4_parse_state
import p4_hlir
from p4_hlir.hlir.p4_tables import p4_table
from compiler import HP4Compiler, CodeRepresentation
import argparse
import itertools
import code
from inspect import currentframe, getframeinfo
import sys
import math
from math import ceil
import json
import pkg_resources
SEB = 320
METADATA_WIDTH = 256
PS_RET_TYPE = 0
PS_RET_CRITERIA = 1
PS_RET_BRANCHES = 2
PS_RET_IMM_STATE = 1
PS_CALL_TYPE = 0
PS_CALL_H_INST = 1
OFFSET = 0
WIDTH = 1
BRANCH_VALUES = 0
BRANCH_STATE = 1
VAL_TYPE = 0
VAL_VALUE = 1
MAX_BYTE = 100
T_NAME = 0
L_BOUND = 1
U_BOUND = 2
HIGHEST_PRIORITY = '0'
LOWEST_PRIORITY = '2147483646'
VBITS_WIDTH = 80
MATCH_TYPE = 1
MATCH_FIELD = 0
PRIM_TYPE = 0
PRIM_SUBTYPE = 1
P4_CALL_PRIMITIVE = 0
P4_CALL_PARAMS = 1
PARAM = 0
PARAM_TYPE = 1
MATCH_OBJECT = 0
MATCH_TYPE = 1
EXT_FIRST_WIDTH = 40 # in bytes
EXT_START_INDEX = 2
parse_select_table_boundaries = [0, 20, 30, 40, 50, 60, 70, 80, 90, 100]
primitive_ID = {'modify_field': '[MODIFY_FIELD]',
'add_header': '[ADD_HEADER]',
'copy_header': '[COPY_HEADER]',
'remove_header': '[REMOVE_HEADER]',
'modify_field_with_hash_based_offset': '[MODIFY_FIELD_WITH_HBO]',
'modify_field_rng_uniform': '[MODIFY_FIELD_RNG_U]',
'truncate': '[TRUNCATE]',
'drop': '[DROP]',
'no_op': '[NO_OP]',
'push': '[PUSH]',
'pop': '[POP]',
'count': '[COUNT]',
'execute_meter': '[METER]',
'generate_digest': '[GENERATE_DIGEST]',
'recirculate': '[RECIRCULATE]',
'resubmit': '[RESUBMIT]',
'clone_ingress_pkt_to_egress': '[CLONE_INGRESS_EGRESS]',
'clone_egress_pkt_to_egress': '[CLONE_EGRESS_EGRESS]',
'multicast': '[MULTICAST]',
'add_to_field': '[MATH_ON_FIELD]',
'bit_xor': '[BIT_XOR]'}
primitive_tnames = {'modify_field': 'mod',
'add_header': 'addh',
'copy_header': '',
'remove_header': 'removeh',
'modify_field_with_hash_based_offset': '',
'modify_field_rng_uniform': 'mod_rng',
'truncate' : 'truncate',
'drop' : 'drop',
'no_op' : '',
'push' : '',
'pop' : '',
'count' : '',
'execute_meter': '',
'generate_digest': '',
'recirculate': '',
'resubmit': '',
'clone_ingress_pkt_to_egress': '',
'clone_egress_pkt_to_egress': '',
'multicast': 'multicast',
'add_to_field': 'math_on_field',
'bit_xor': 'bit_xor'}
mf_prim_subtype_ID = {('meta', 'ingress_port'): '1',
('meta', 'packet_length'): '2',
('meta', 'egress_spec'): '3',
('meta', 'egress_port'): '4',
('meta', 'egress_instance'): '5',
('meta', 'instance_type'): '6',
('egress_spec', 'meta'): '7',
('meta', 'const'): '8',
('egress_spec', 'const'): '9',
('ext', 'const'): '10',
('egress_spec', 'ingress_port'): '11',
('ext', 'ext'): '12',
('meta', 'ext'): '13',
('ext', 'meta'): '14'}
mf_prim_subtype_action = {'1': 'mod_meta_stdmeta_ingressport',
'2': 'mod_meta_stdmeta_packetlength',
'3': 'mod_meta_stdmeta_egressspec',
'4': 'mod_meta_stdmeta_egressport',
'5': 'mod_meta_stdmeta_egressinst',
'6': 'mod_meta_stdmeta_insttype',
'7': 'mod_stdmeta_egressspec_meta',
'8': 'mod_meta_const',
'9': 'mod_stdmeta_egressspec_const',
'10': 'mod_extracted_const',
'11': 'mod_stdmeta_egressspec_stdmeta_ingressport',
'12': 'mod_extracted_extracted',
'13': 'mod_meta_extracted',
'14': 'mod_extracted_meta'}
a2f_prim_subtype_ID = {'add': '1', 'sub': '2'}
a2f_prim_subtype_action = {'1': 'a_add2f_extracted_const_u',
'2': 'a_subff_extracted_const_u'}
bx_prim_subtype_ID = {('meta', 'meta', 'const'): '1',
('ext', 'ext', 'const'): '2',
('meta', 'ext', 'const'): '3'}
bx_prim_subtype_action = {'1': 'bit_xor_meta_meta_const',
'2': 'bit_xor_extracted_extracted_const',
'3': 'bit_xor_meta_extracted_const'}
gen_prim_subtype_action = {'add_header': 'a_addh',
'copy_header': '',
'remove_header': 'a_removeh',
'modify_field_with_hash_based_offset': '',
'modify_field_rng_uniform': 'mod_extracted_rng',
'truncate': 'a_truncate',
'drop': 'a_drop',
'no_op': '',
'push': '',
'pop': '',
'count': '',
'execute_meter': '',
'recirculate': '',
'resubmit': '',
'clone_ingress_pkt_to_egress': '',
'clone_egress_pkt_to_egress': '',
'multicast': 'a_multicast'}
current_call = tuple
def debug():
""" Break and enter interactive method after printing location info """
# written before I knew about the pdb module
caller = currentframe().f_back
method_name = caller.f_code.co_name
line_no = getframeinfo(caller).lineno
print(method_name + ": line " + str(line_no))
code.interact(local=dict(globals(), **caller.f_locals))
def unsupported(msg):
print(msg)
exit()
def convert_to_builtin_type(obj):
d = { '__class__':obj.__class__.__name__, '__module__':obj.__module__, }
d.update(obj.__dict__)
return d
class HP4_Command(object):
def __init__(self, command='table_add',
table='',
action='',
match_params=[],
action_params=[]):
self.command = command
self.table = table
self.action = action
self.match_params = match_params
self.action_params = action_params
def __str__(self):
""" assumes command is \'table_add\' """
if self.command != 'table_add':
debug()
raise Exception("Incorrect table command %s, table %s" % (self.command, self.table))
ret = self.table + ' ' + self.action + ' :'
ret += ' '.join(self.match_params)
ret += ':'
ret += ' '.join(self.action_params)
return ret
class HP4_Match_Command(HP4_Command):
def __init__(self, source_table='',
source_action='',
**kwargs):
super(HP4_Match_Command, self).__init__(**kwargs)
self.source_table = source_table
self.source_action = source_action
class HP4_Primitive_Command(HP4_Command):
def __init__(self, source_table, source_action, command, table, action, mparams, aparams, src_aparam_id):
HP4_Command.__init__(self, command, table, action, mparams, aparams)
self.source_table = source_table
self.source_action = source_action
self.src_aparam_id = src_aparam_id
class DAG_Topo_Sorter():
def __init__(self, p4_tables):
self.unmarked = []
self.tempmarked = []
self.permmarked = []
self.L = []
for key in p4_tables:
self.unmarked.append(p4_tables[key])
def visit(self, n):
if n.control_flow_parent == 'egress':
unsupported("ERROR: Not yet supported: tables in egress (" + n.name + ")")
if n in self.tempmarked:
unsupported("ERROR: not a DAG")
if n in self.unmarked:
self.unmarked.remove(n)
self.tempmarked.append(n)
for m in n.next_.values():
if m != None:
self.visit(m)
self.permmarked.append(n)
self.tempmarked.remove(n)
self.L.insert(0, n)
def sort(self):
while len(self.unmarked) > 0: # while there are unmarked nodes do
n = self.unmarked[0]
self.visit(n)
return self.L
class Table_Rep():
def __init__(self, stage, match_type, source_type, field_name):
self.stage = stage # int
self.match_type = match_type
self.source_type = source_type
self.field_name = field_name
self.name = 't' + str(self.stage) + '_'
if source_type == 'standard_metadata':
self.name += 'stdmeta_' + field_name + '_'
elif source_type == 'metadata':
self.name += 'metadata_'
elif source_type == 'extracted':
self.name += 'extracted_'
if match_type == 'P4_MATCH_EXACT':
self.name += 'exact'
elif match_type == 'P4_MATCH_VALID':
self.name += 'valid'
elif match_type == 'P4_MATCH_TERNARY':
self.name += 'ternary'
elif match_type == 'MATCHLESS':
self.name += 'matchless'
def table_type(self):
if self.source_type == 'standard_metadata':
if self.match_type == 'P4_MATCH_EXACT':
if self.field_name == 'ingress_port':
return '[STDMETA_INGRESS_PORT_EXACT]'
elif self.field_name == 'packet_length':
return '[STDMETA_PACKET_LENGTH_EXACT]'
elif self.field_name == 'instance_type':
return '[STDMETA_INSTANCE_TYPE_EXACT]'
elif self.field_name == 'egress_spec':
return '[STDMETA_EGRESS_SPEC_EXACT]'
else:
unsupported("Not supported: standard_metadata field %s" \
% self.field_name)
else:
unsupported("Not supported: standard_metadata with %s match type" \
% self.match_type)
elif self.source_type == 'metadata':
if self.match_type == 'P4_MATCH_EXACT':
return '[METADATA_EXACT]'
elif self.match_type == 'P4_MATCH_TERNARY':
return '[METADATA_TERNARY]'
else:
unsupported("Not supported: metadata with %s match type" \
% self.match_type)
elif self.source_type == 'extracted':
if self.match_type == 'P4_MATCH_EXACT':
return '[EXTRACTED_EXACT]'
elif self.match_type == 'P4_MATCH_VALID':
return '[EXTRACTED_VALID]'
elif self.match_type == 'P4_MATCH_TERNARY':
return '[EXTRACTED_TERNARY]'
else:
unsupported("Not supported: extracted with %s match type" \
% self.match_type)
elif self.source_type == '':
if self.match_type == 'MATCHLESS':
return '[MATCHLESS]'
else:
unsupported("Not supported: [no source] with %s match type" \
% self.match_type)
else:
unsupported("Not supported: source type %s, match type %s" \
% (self.source_type, self.match_type))
def __str__(self):
return self.name
class Action_Rep():
def __init__(self):
self.stages = set()
self.tables = {} # {stage (int) : table_name (str)}
self.next = {} # {table_name (str) : (next_stage (int), next_table_code (int))}
self.call_sequence = []
class PC_State(object):
newid = itertools.count().next
def __init__(self, hp4_bits_extracted=SEB,
p4_bits_extracted=0,
ps_path=[],
pcs_path=[],
parse_state=None,
entry_table='tset_parse_control',
**kwargs):
self.hp4_bits_extracted = hp4_bits_extracted
self.p4_bits_extracted = p4_bits_extracted
self.ps_path = ps_path
self.pcs_path = pcs_path
self.pcs_id = PC_State.newid()
self.parse_state = parse_state
self.entry_table = entry_table # TODO: Delete if we don't need this
self.children = []
self.header_offsets = {} # header name (str) : hp4 bit offset (int)
for pcs in self.pcs_path:
self.header_offsets.update(pcs.header_offsets)
self.select_criteria = [] # list of (offset, width) tuples, each
# element corresponding to a criteria in the
# select statement, representing the hp4 view
self.select_values = [] # list of lists: each member a list of values,
# each value corresponding to a criteria in
# select_criteria
def __str__(self):
ret = 'ID: ' + str(self.pcs_id) + '; ' + self.parse_state.name + '\n'
ret += 'hp4_bits_extracted: ' + str(self.hp4_bits_extracted) + '\n'
ret += 'p4_bits_extracted: ' + str(self.p4_bits_extracted) + '\n'
ret += 'ps_path: ' + str(self.ps_path) + '\n'
ret += 'pcs_path: '
for pcs in self.pcs_path:
ret += str(pcs.pcs_id) + '(' + pcs.parse_state.name + ') '
ret += '\n'
ret += 'children: '
for child in self.children:
ret += child.parse_state.name + ' '
return ret
def collect_meta(headers):
""" Classify headers (metadata | parsed representation)
- For metadata: assign each field an offset into meta.data
- NOTE: the same cannot be done for parsed representation headers
until we traverse the parse tree, because each path through the
parse tree potentially yields a distinct set of field offsets
into pr.data.
"""
meta_offsets = {}
metadata_offset = 0
for header_key in headers.keys():
header = headers[header_key]
if header.name == 'standard_metadata':
continue
if header.name == 'intrinsic_metadata':
continue
if header.metadata == True:
for field in header.fields:
fullname = header.name + '.' + field.name
meta_offsets[fullname] = metadata_offset
metadata_offset += field.width
if metadata_offset > METADATA_WIDTH:
unsupported("Error: out of metadata memory with %s" % fullname)
return meta_offsets
def collect_actions(actions):
""" Uniquely number each action """
action_ID = {}
actionID = 1
for action in actions:
if action.lineno > 0: # is action from source (else built-in)?
action_ID[action] = actionID
actionID += 1
return action_ID
def get_prim_subtype(p4_call):
""" p4_call: (p4_action, [list of parameters])
"""
primitive = p4_call[P4_CALL_PRIMITIVE]
params = p4_call[P4_CALL_PARAMS]
if (primitive.name == 'drop' or
primitive.name == 'add_header' or
primitive.name == 'remove_header' or
primitive.name == 'modify_field_rng_uniform'):
return '0'
elif primitive.name == 'add_to_field':
if type(params[0]) is p4_hlir.hlir.p4_headers.p4_field:
if params[0].instance.metadata == True:
unsupported("Not supported: metadata (%s) as dst field in \
add_to_field" % params[0].instance.name)
else:
if type(params[1]) is int:
if params[1] < 0:
return(a2f_prim_subtype_ID['sub'])
else:
return(a2f_prim_subtype_ID['add'])
else:
unsupported("ERROR: Not supported: %s type for src field in \
add_to_field" % type(params[1]))
else:
unsupported("ERROR: dst field type %s in add_to_field" % | |
is
# behaving as a mailing list
if shared.safeConfigGetBoolean(toAddress, 'mailinglist') and messageEncodingType != 0:
try:
mailingListName = shared.config.get(
toAddress, 'mailinglistname')
except:
mailingListName = ''
# Let us send out this message as a broadcast
subject = self.addMailingListNameToSubject(
subject, mailingListName)
# Let us now send this message out as a broadcast
message = time.strftime("%a, %Y-%m-%d %H:%M:%S UTC", time.gmtime(
)) + ' Message ostensibly from ' + fromAddress + ':\n\n' + body
fromAddress = toAddress # The fromAddress for the broadcast that we are about to send is the toAddress (my address) for the msg message we are currently processing.
ackdataForBroadcast = OpenSSL.rand(
32) # We don't actually need the ackdataForBroadcast for acknowledgement since this is a broadcast message but we can use it to update the user interface when the POW is done generating.
toAddress = '[Broadcast subscribers]'
ripe = ''
# We really should have a discussion about how to
# set the TTL for mailing list broadcasts. This is obviously
# hard-coded.
TTL = 2*7*24*60*60 # 2 weeks
t = ('',
toAddress,
ripe,
fromAddress,
subject,
message,
ackdataForBroadcast,
int(time.time()), # sentTime (this doesn't change)
int(time.time()), # lastActionTime
0,
'broadcastqueued',
0,
'sent',
2,
TTL)
helper_sent.insert(t)
shared.UISignalQueue.put(('displayNewSentMessage', (
toAddress, '[Broadcast subscribers]', fromAddress, subject, message, ackdataForBroadcast)))
shared.workerQueue.put(('sendbroadcast', ''))
# Don't send ACK if invalid, blacklisted senders, invisible messages, disabled or chan
if self.ackDataHasAValidHeader(ackData) and \
not blockMessage and \
messageEncodingType != 0 and \
not shared.safeConfigGetBoolean(toAddress, 'dontsendack') and \
not shared.safeConfigGetBoolean(toAddress, 'chan'):
shared.checkAndShareObjectWithPeers(ackData[24:])
# Display timing data
timeRequiredToAttemptToDecryptMessage = time.time(
) - messageProcessingStartTime
shared.successfullyDecryptMessageTimings.append(
timeRequiredToAttemptToDecryptMessage)
sum = 0
for item in shared.successfullyDecryptMessageTimings:
sum += item
logger.debug('Time to decrypt this message successfully: %s\n\
Average time for all message decryption successes since startup: %s.' %
(timeRequiredToAttemptToDecryptMessage, sum / len(shared.successfullyDecryptMessageTimings))
)
def processbroadcast(self, data):
messageProcessingStartTime = time.time()
shared.numberOfBroadcastsProcessed += 1
shared.UISignalQueue.put((
'updateNumberOfBroadcastsProcessed', 'no data'))
inventoryHash = calculateInventoryHash(data)
readPosition = 20 # bypass the nonce, time, and object type
broadcastVersion, broadcastVersionLength = decodeVarint(
data[readPosition:readPosition + 9])
readPosition += broadcastVersionLength
if broadcastVersion < 4 or broadcastVersion > 5:
logger.info('Cannot decode incoming broadcast versions less than 4 or higher than 5. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.')
return
cleartextStreamNumber, cleartextStreamNumberLength = decodeVarint(
data[readPosition:readPosition + 10])
readPosition += cleartextStreamNumberLength
if broadcastVersion == 4:
"""
v4 broadcasts are encrypted the same way the msgs are encrypted. To see if we are interested in a
v4 broadcast, we try to decrypt it. This was replaced with v5 broadcasts which include a tag which
we check instead, just like we do with v4 pubkeys.
"""
signedData = data[8:readPosition]
initialDecryptionSuccessful = False
for key, cryptorObject in shared.MyECSubscriptionCryptorObjects.items():
try:
if initialDecryptionSuccessful: # continue decryption attempts to avoid timing attacks
cryptorObject.decrypt(data[readPosition:])
else:
decryptedData = cryptorObject.decrypt(data[readPosition:])
toRipe = key # This is the RIPE hash of the sender's pubkey. We need this below to compare to the RIPE hash of the sender's address to verify that it was encrypted by with their key rather than some other key.
initialDecryptionSuccessful = True
logger.info('EC decryption successful using key associated with ripe hash: %s' % hexlify(key))
except Exception as err:
pass
# print 'cryptorObject.decrypt Exception:', err
if not initialDecryptionSuccessful:
# This is not a broadcast I am interested in.
logger.debug('Length of time program spent failing to decrypt this v4 broadcast: %s seconds.' % (time.time() - messageProcessingStartTime,))
return
elif broadcastVersion == 5:
embeddedTag = data[readPosition:readPosition+32]
readPosition += 32
if embeddedTag not in shared.MyECSubscriptionCryptorObjects:
logger.debug('We\'re not interested in this broadcast.')
return
# We are interested in this broadcast because of its tag.
signedData = data[8:readPosition] # We're going to add some more data which is signed further down.
cryptorObject = shared.MyECSubscriptionCryptorObjects[embeddedTag]
try:
decryptedData = cryptorObject.decrypt(data[readPosition:])
logger.debug('EC decryption successful')
except Exception as err:
logger.debug('Broadcast version %s decryption Unsuccessful.' % broadcastVersion)
return
# At this point this is a broadcast I have decrypted and am
# interested in.
readPosition = 0
sendersAddressVersion, sendersAddressVersionLength = decodeVarint(
decryptedData[readPosition:readPosition + 9])
if broadcastVersion == 4:
if sendersAddressVersion < 2 or sendersAddressVersion > 3:
logger.warning('Cannot decode senderAddressVersion other than 2 or 3. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.')
return
elif broadcastVersion == 5:
if sendersAddressVersion < 4:
logger.info('Cannot decode senderAddressVersion less than 4 for broadcast version number 5. Assuming the sender isn\'t being silly, you should upgrade Bitmessage because this message shall be ignored.')
return
readPosition += sendersAddressVersionLength
sendersStream, sendersStreamLength = decodeVarint(
decryptedData[readPosition:readPosition + 9])
if sendersStream != cleartextStreamNumber:
logger.info('The stream number outside of the encryption on which the POW was completed doesn\'t match the stream number inside the encryption. Ignoring broadcast.')
return
readPosition += sendersStreamLength
behaviorBitfield = decryptedData[readPosition:readPosition + 4]
readPosition += 4
sendersPubSigningKey = '\x04' + \
decryptedData[readPosition:readPosition + 64]
readPosition += 64
sendersPubEncryptionKey = '\x04' + \
decryptedData[readPosition:readPosition + 64]
readPosition += 64
if sendersAddressVersion >= 3:
requiredAverageProofOfWorkNonceTrialsPerByte, varintLength = decodeVarint(
decryptedData[readPosition:readPosition + 10])
readPosition += varintLength
logger.debug('sender\'s requiredAverageProofOfWorkNonceTrialsPerByte is %s' % requiredAverageProofOfWorkNonceTrialsPerByte)
requiredPayloadLengthExtraBytes, varintLength = decodeVarint(
decryptedData[readPosition:readPosition + 10])
readPosition += varintLength
logger.debug('sender\'s requiredPayloadLengthExtraBytes is %s' % requiredPayloadLengthExtraBytes)
endOfPubkeyPosition = readPosition
sha = hashlib.new('sha512')
sha.update(sendersPubSigningKey + sendersPubEncryptionKey)
ripeHasher = hashlib.new('ripemd160')
ripeHasher.update(sha.digest())
calculatedRipe = ripeHasher.digest()
if broadcastVersion == 4:
if toRipe != calculatedRipe:
logger.info('The encryption key used to encrypt this message doesn\'t match the keys inbedded in the message itself. Ignoring message.')
return
elif broadcastVersion == 5:
calculatedTag = hashlib.sha512(hashlib.sha512(encodeVarint(
sendersAddressVersion) + encodeVarint(sendersStream) + calculatedRipe).digest()).digest()[32:]
if calculatedTag != embeddedTag:
logger.debug('The tag and encryption key used to encrypt this message doesn\'t match the keys inbedded in the message itself. Ignoring message.')
return
messageEncodingType, messageEncodingTypeLength = decodeVarint(
decryptedData[readPosition:readPosition + 9])
if messageEncodingType == 0:
return
readPosition += messageEncodingTypeLength
messageLength, messageLengthLength = decodeVarint(
decryptedData[readPosition:readPosition + 9])
readPosition += messageLengthLength
message = decryptedData[readPosition:readPosition + messageLength]
readPosition += messageLength
readPositionAtBottomOfMessage = readPosition
signatureLength, signatureLengthLength = decodeVarint(
decryptedData[readPosition:readPosition + 9])
readPosition += signatureLengthLength
signature = decryptedData[
readPosition:readPosition + signatureLength]
signedData += decryptedData[:readPositionAtBottomOfMessage]
if not highlevelcrypto.verify(signedData, signature, hexlify(sendersPubSigningKey)):
logger.debug('ECDSA verify failed')
return
logger.debug('ECDSA verify passed')
sigHash = hashlib.sha512(hashlib.sha512(signature).digest()).digest()[32:] # Used to detect and ignore duplicate messages in our inbox
fromAddress = encodeAddress(
sendersAddressVersion, sendersStream, calculatedRipe)
logger.info('fromAddress: %s' % fromAddress)
# Let's store the public key in case we want to reply to this person.
sqlExecute('''INSERT INTO pubkeys VALUES (?,?,?,?,?)''',
fromAddress,
sendersAddressVersion,
decryptedData[:endOfPubkeyPosition],
int(time.time()),
'yes')
# Check to see whether we happen to be awaiting this
# pubkey in order to send a message. If we are, it will do the POW
# and send it.
self.possibleNewPubkey(fromAddress)
fromAddress = encodeAddress(
sendersAddressVersion, sendersStream, calculatedRipe)
logger.debug('fromAddress: ' + fromAddress)
if messageEncodingType == 2:
subject, body = self.decodeType2Message(message)
logger.info('Broadcast subject (first 100 characters): %s' % repr(subject)[:100])
elif messageEncodingType == 1:
body = message
subject = ''
elif messageEncodingType == 0:
logger.info('messageEncodingType == 0. Doing nothing with the message.')
return
else:
body = 'Unknown encoding type.\n\n' + repr(message)
subject = ''
toAddress = '[Broadcast subscribers]'
if helper_inbox.isMessageAlreadyInInbox(sigHash):
logger.info('This broadcast is already in our inbox. Ignoring it.')
return
t = (inventoryHash, toAddress, fromAddress, subject, int(
time.time()), body, 'inbox', messageEncodingType, 0, sigHash)
helper_inbox.insert(t)
shared.UISignalQueue.put(('displayNewInboxMessage', (
inventoryHash, toAddress, fromAddress, subject, body)))
# If we are behaving as an API then we might need to run an
# outside command to let some program know that a new message
# has arrived.
if shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):
try:
apiNotifyPath = shared.config.get(
'bitmessagesettings', 'apinotifypath')
except:
apiNotifyPath = ''
if apiNotifyPath != '':
call([apiNotifyPath, "newBroadcast"])
# Display timing data
logger.info('Time spent processing this interesting broadcast: %s' % (time.time() - messageProcessingStartTime,))
def possibleNewPubkey(self, address):
"""
We have inserted a pubkey into our pubkey table which we received from a
pubkey, msg, or broadcast message. It might be one that we have been
waiting for. Let's check.
"""
# For address versions <= | |
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Specpr(KaitaiStruct):
"""Specpr records are fixed format, 1536 bytes/record. Record number
counting starts at 0. Binary data are in IEEE format real numbers
and non-byte swapped integers (compatiible with all Sun
Microsystems, and Hewlett Packard workstations (Intel and some DEC
machines are byte swapped relative to Suns and HPs). Each record may
contain different information according to the following scheme.
You can get some library of spectra from
ftp://ftpext.cr.usgs.gov/pub/cr/co/denver/speclab/pub/spectral.library/splib06.library/
"""
class RecordType(Enum):
data_initial = 0
text_initial = 1
data_continuation = 2
text_continuation = 3
SEQ_FIELDS = ["records"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['records']['start'] = self._io.pos()
self.records = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['records']:
self._debug['records']['arr'] = []
self._debug['records']['arr'].append({'start': self._io.pos()})
_t_records = self._root.Record(self._io, self, self._root)
_t_records._read()
self.records.append(_t_records)
self._debug['records']['arr'][len(self.records) - 1]['end'] = self._io.pos()
i += 1
self._debug['records']['end'] = self._io.pos()
class DataInitial(KaitaiStruct):
SEQ_FIELDS = ["ids", "iscta", "isctb", "jdatea", "jdateb", "istb", "isra", "isdec", "itchan", "irmas", "revs", "iband", "irwav", "irespt", "irecno", "itpntr", "ihist", "mhist", "nruns", "siangl", "seangl", "sphase", "iwtrns", "itimch", "xnrm", "scatim", "timint", "tempd", "data"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['ids']['start'] = self._io.pos()
self.ids = self._root.Identifiers(self._io, self, self._root)
self.ids._read()
self._debug['ids']['end'] = self._io.pos()
self._debug['iscta']['start'] = self._io.pos()
self.iscta = self._root.CoarseTimestamp(self._io, self, self._root)
self.iscta._read()
self._debug['iscta']['end'] = self._io.pos()
self._debug['isctb']['start'] = self._io.pos()
self.isctb = self._root.CoarseTimestamp(self._io, self, self._root)
self.isctb._read()
self._debug['isctb']['end'] = self._io.pos()
self._debug['jdatea']['start'] = self._io.pos()
self.jdatea = self._io.read_s4be()
self._debug['jdatea']['end'] = self._io.pos()
self._debug['jdateb']['start'] = self._io.pos()
self.jdateb = self._io.read_s4be()
self._debug['jdateb']['end'] = self._io.pos()
self._debug['istb']['start'] = self._io.pos()
self.istb = self._root.CoarseTimestamp(self._io, self, self._root)
self.istb._read()
self._debug['istb']['end'] = self._io.pos()
self._debug['isra']['start'] = self._io.pos()
self.isra = self._io.read_s4be()
self._debug['isra']['end'] = self._io.pos()
self._debug['isdec']['start'] = self._io.pos()
self.isdec = self._io.read_s4be()
self._debug['isdec']['end'] = self._io.pos()
self._debug['itchan']['start'] = self._io.pos()
self.itchan = self._io.read_s4be()
self._debug['itchan']['end'] = self._io.pos()
self._debug['irmas']['start'] = self._io.pos()
self.irmas = self._io.read_s4be()
self._debug['irmas']['end'] = self._io.pos()
self._debug['revs']['start'] = self._io.pos()
self.revs = self._io.read_s4be()
self._debug['revs']['end'] = self._io.pos()
self._debug['iband']['start'] = self._io.pos()
self.iband = [None] * (2)
for i in range(2):
if not 'arr' in self._debug['iband']:
self._debug['iband']['arr'] = []
self._debug['iband']['arr'].append({'start': self._io.pos()})
self.iband[i] = self._io.read_s4be()
self._debug['iband']['arr'][i]['end'] = self._io.pos()
self._debug['iband']['end'] = self._io.pos()
self._debug['irwav']['start'] = self._io.pos()
self.irwav = self._io.read_s4be()
self._debug['irwav']['end'] = self._io.pos()
self._debug['irespt']['start'] = self._io.pos()
self.irespt = self._io.read_s4be()
self._debug['irespt']['end'] = self._io.pos()
self._debug['irecno']['start'] = self._io.pos()
self.irecno = self._io.read_s4be()
self._debug['irecno']['end'] = self._io.pos()
self._debug['itpntr']['start'] = self._io.pos()
self.itpntr = self._io.read_s4be()
self._debug['itpntr']['end'] = self._io.pos()
self._debug['ihist']['start'] = self._io.pos()
self.ihist = (KaitaiStream.bytes_strip_right(self._io.read_bytes(60), 32)).decode(u"ascii")
self._debug['ihist']['end'] = self._io.pos()
self._debug['mhist']['start'] = self._io.pos()
self.mhist = [None] * (4)
for i in range(4):
if not 'arr' in self._debug['mhist']:
self._debug['mhist']['arr'] = []
self._debug['mhist']['arr'].append({'start': self._io.pos()})
self.mhist[i] = (self._io.read_bytes(74)).decode(u"ascii")
self._debug['mhist']['arr'][i]['end'] = self._io.pos()
self._debug['mhist']['end'] = self._io.pos()
self._debug['nruns']['start'] = self._io.pos()
self.nruns = self._io.read_s4be()
self._debug['nruns']['end'] = self._io.pos()
self._debug['siangl']['start'] = self._io.pos()
self.siangl = self._root.IllumAngle(self._io, self, self._root)
self.siangl._read()
self._debug['siangl']['end'] = self._io.pos()
self._debug['seangl']['start'] = self._io.pos()
self.seangl = self._root.IllumAngle(self._io, self, self._root)
self.seangl._read()
self._debug['seangl']['end'] = self._io.pos()
self._debug['sphase']['start'] = self._io.pos()
self.sphase = self._io.read_s4be()
self._debug['sphase']['end'] = self._io.pos()
self._debug['iwtrns']['start'] = self._io.pos()
self.iwtrns = self._io.read_s4be()
self._debug['iwtrns']['end'] = self._io.pos()
self._debug['itimch']['start'] = self._io.pos()
self.itimch = self._io.read_s4be()
self._debug['itimch']['end'] = self._io.pos()
self._debug['xnrm']['start'] = self._io.pos()
self.xnrm = self._io.read_f4be()
self._debug['xnrm']['end'] = self._io.pos()
self._debug['scatim']['start'] = self._io.pos()
self.scatim = self._io.read_f4be()
self._debug['scatim']['end'] = self._io.pos()
self._debug['timint']['start'] = self._io.pos()
self.timint = self._io.read_f4be()
self._debug['timint']['end'] = self._io.pos()
self._debug['tempd']['start'] = self._io.pos()
self.tempd = self._io.read_f4be()
self._debug['tempd']['end'] = self._io.pos()
self._debug['data']['start'] = self._io.pos()
self.data = [None] * (256)
for i in range(256):
if not 'arr' in self._debug['data']:
self._debug['data']['arr'] = []
self._debug['data']['arr'].append({'start': self._io.pos()})
self.data[i] = self._io.read_f4be()
self._debug['data']['arr'][i]['end'] = self._io.pos()
self._debug['data']['end'] = self._io.pos()
@property
def phase_angle_arcsec(self):
"""The phase angle between iangl and eangl in seconds."""
if hasattr(self, '_m_phase_angle_arcsec'):
return self._m_phase_angle_arcsec if hasattr(self, '_m_phase_angle_arcsec') else None
self._m_phase_angle_arcsec = (self.sphase / 1500)
return self._m_phase_angle_arcsec if hasattr(self, '_m_phase_angle_arcsec') else None
class CoarseTimestamp(KaitaiStruct):
SEQ_FIELDS = ["scaled_seconds"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['scaled_seconds']['start'] = self._io.pos()
self.scaled_seconds = self._io.read_s4be()
self._debug['scaled_seconds']['end'] = self._io.pos()
@property
def seconds(self):
if hasattr(self, '_m_seconds'):
return self._m_seconds if hasattr(self, '_m_seconds') else None
self._m_seconds = (self.scaled_seconds * 24000)
return self._m_seconds if hasattr(self, '_m_seconds') else None
class Icflag(KaitaiStruct):
"""it is big endian."""
SEQ_FIELDS = ["reserved", "isctb_type", "iscta_type", "coordinate_mode", "errors", "text", "continuation"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['reserved']['start'] = self._io.pos()
self.reserved = self._io.read_bits_int(26)
self._debug['reserved']['end'] = self._io.pos()
self._debug['isctb_type']['start'] = self._io.pos()
self.isctb_type = self._io.read_bits_int(1) != 0
self._debug['isctb_type']['end'] = self._io.pos()
self._debug['iscta_type']['start'] = self._io.pos()
self.iscta_type = self._io.read_bits_int(1) != 0
self._debug['iscta_type']['end'] = self._io.pos()
self._debug['coordinate_mode']['start'] = self._io.pos()
self.coordinate_mode = self._io.read_bits_int(1) != 0
self._debug['coordinate_mode']['end'] = self._io.pos()
self._debug['errors']['start'] = self._io.pos()
self.errors = self._io.read_bits_int(1) != 0
self._debug['errors']['end'] = self._io.pos()
self._debug['text']['start'] = self._io.pos()
self.text = self._io.read_bits_int(1) != 0
self._debug['text']['end'] = self._io.pos()
self._debug['continuation']['start'] = self._io.pos()
self.continuation = self._io.read_bits_int(1) != 0
self._debug['continuation']['end'] = self._io.pos()
@property
def type(self):
if hasattr(self, '_m_type'):
return self._m_type if hasattr(self, '_m_type') else None
self._m_type = KaitaiStream.resolve_enum(self._root.RecordType, ((int(self.text) * 1) + (int(self.continuation) * 2)))
return self._m_type if hasattr(self, '_m_type') else None
class DataContinuation(KaitaiStruct):
SEQ_FIELDS = ["cdata"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['cdata']['start'] = self._io.pos()
self.cdata = [None] * (383)
for i in range(383):
if not 'arr' in self._debug['cdata']:
self._debug['cdata']['arr'] = []
self._debug['cdata']['arr'].append({'start': self._io.pos()})
self.cdata[i] = self._io.read_f4be()
self._debug['cdata']['arr'][i]['end'] = self._io.pos()
self._debug['cdata']['end'] = self._io.pos()
class Identifiers(KaitaiStruct):
SEQ_FIELDS = ["ititle", "usernm"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['ititle']['start'] = self._io.pos()
self.ititle = (KaitaiStream.bytes_strip_right(self._io.read_bytes(40), 32)).decode(u"ascii")
self._debug['ititle']['end'] = self._io.pos()
self._debug['usernm']['start'] = self._io.pos()
self.usernm = (self._io.read_bytes(8)).decode(u"ascii")
self._debug['usernm']['end'] = self._io.pos()
class IllumAngle(KaitaiStruct):
SEQ_FIELDS = ["angl"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['angl']['start'] = self._io.pos()
self.angl = self._io.read_s4be()
self._debug['angl']['end'] = self._io.pos()
@property
def seconds_total(self):
if hasattr(self, '_m_seconds_total'):
return self._m_seconds_total if hasattr(self, '_m_seconds_total') else None
self._m_seconds_total = self.angl // 6000
return self._m_seconds_total if hasattr(self, '_m_seconds_total') else None
@property
def minutes_total(self):
if hasattr(self, '_m_minutes_total'):
return self._m_minutes_total if hasattr(self, '_m_minutes_total') else None
self._m_minutes_total = self.seconds_total // 60
return self._m_minutes_total if hasattr(self, '_m_minutes_total') else None
@property
def degrees_total(self):
if hasattr(self, '_m_degrees_total'):
return self._m_degrees_total if hasattr(self, '_m_degrees_total') else None
self._m_degrees_total = self.minutes_total // 60
return self._m_degrees_total if hasattr(self, '_m_degrees_total') else None
class TextInitial(KaitaiStruct):
SEQ_FIELDS = ["ids", "itxtpt", "itxtch", "itext"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['ids']['start'] = self._io.pos()
self.ids = self._root.Identifiers(self._io, self, self._root)
self.ids._read()
self._debug['ids']['end'] = self._io.pos()
self._debug['itxtpt']['start'] = self._io.pos()
self.itxtpt = self._io.read_u4be()
self._debug['itxtpt']['end'] = self._io.pos()
self._debug['itxtch']['start'] = self._io.pos()
self.itxtch = self._io.read_s4be()
self._debug['itxtch']['end'] = self._io.pos()
self._debug['itext']['start'] = self._io.pos()
self.itext = (self._io.read_bytes(1476)).decode(u"ascii")
self._debug['itext']['end'] = self._io.pos()
class Record(KaitaiStruct):
SEQ_FIELDS = ["icflag", "content"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['icflag']['start'] = self._io.pos()
self.icflag = self._root.Icflag(self._io, self, self._root)
self.icflag._read()
self._debug['icflag']['end'] = self._io.pos()
self._debug['content']['start'] = self._io.pos()
_on = self.icflag.type
if _on == self._root.RecordType.data_initial:
self._raw_content = self._io.read_bytes((1536 - 4))
_io__raw_content = KaitaiStream(BytesIO(self._raw_content))
self.content = self._root.DataInitial(_io__raw_content, self, self._root)
self.content._read()
elif _on == self._root.RecordType.data_continuation:
self._raw_content = self._io.read_bytes((1536 - 4))
_io__raw_content = KaitaiStream(BytesIO(self._raw_content))
self.content = self._root.DataContinuation(_io__raw_content, self, self._root)
self.content._read()
elif _on == self._root.RecordType.text_continuation:
self._raw_content = self._io.read_bytes((1536 - 4))
_io__raw_content = KaitaiStream(BytesIO(self._raw_content))
self.content = self._root.TextContinuation(_io__raw_content, self, self._root)
self.content._read()
elif _on == self._root.RecordType.text_initial:
self._raw_content = self._io.read_bytes((1536 - 4))
_io__raw_content = KaitaiStream(BytesIO(self._raw_content))
self.content = self._root.TextInitial(_io__raw_content, self, self._root)
self.content._read()
else:
self.content = self._io.read_bytes((1536 - 4))
self._debug['content']['end'] = self._io.pos()
class TextContinuation(KaitaiStruct):
SEQ_FIELDS = ["tdata"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
| |
get_row(self, idx: int) -> List[float]:
""" TODO: Method docstring
"""
return self._A[idx]
def get_col(self, idx: int) -> List[float]:
""" TODO: Method docstring
"""
return [row[idx] for row in self._A]
def transpose(self) -> 'Matrix':
""" Returns the transpose of the calling matrix.
"""
M = Matrix.zeros(self.num_cols, self.num_rows)
for i in range(self.num_rows):
for j in range(self.num_cols):
M[j,i] = self[i,j]
return M
def determinant(self) -> float:
""" Returns the determinant of the calling matrix.
"""
m, n = self.size
if m != n:
raise ValueError("The calling matrix is not square and the determinant does not exist.")
if m == 2:
d = self[0,0] * self[1,1] - self[0,1] * self[1,0]
else:
d = 0.0
for j in range(self.num_cols):
A_temp = copy(self[:, :])
A_temp[0, :] = Matrix.empty()
A_temp[:, j] = Matrix.empty()
d += (self[0, j] * pow(-1, j) * A_temp.determinant())
return d
def inverse(self) -> 'Matrix':
""" Returns the inverse of the calling matrix, computed using the cofactor method.
"""
def compute_cofactor_matrix(A: 'Matrix') -> 'Matrix':
""" Returns the cofactor matrix computed from the input matrix.
"""
m, n = A.size
if m != n:
raise ValueError("The input matrix is not square. The cofactor matrix does not "
"exist.")
M = Matrix.zeros(*A.size)
for i in range(A.num_rows):
for j in range(A.num_cols):
A_temp = A[:, :]
A_temp[i, :] = Matrix.empty()
A_temp[:, j] = Matrix.empty()
M[i, j] = pow(-1, i + j) * A_temp.determinant()
return M
m,n = self.size
if m != n:
raise ValueError("The calling matrix is not square. The matrix inverse does not exist.")
d = self.determinant()
if not d:
raise ValueError("The calling matrix is singular. The matrix inverse does not exist.")
return (1 / d) * compute_cofactor_matrix(self).transpose()
def is_row_matrix(self) -> bool:
""" Returns True if the calling Matrix is a row matrix (i.e. has one row and one or more
columns), False otherwise.
Returns:
bool: Boolean indicator of whether or not the calling matrix is a row matrix.
"""
return self.num_rows == 1
def is_column_matrix(self) -> bool:
""" Returns True if the calling Matrix is a column matrix (i.e. has one column and one or
more rows), False otherwise.
Returns:
bool: Boolean indicator of whether or not the calling matrix is a column matrix.
"""
return self.num_cols == 1
def is_square(self) -> bool:
""" Returns True if the calling Matrix is square (i.e. the number of rows equals the
number of columns), False otherwise.
Returns:
bool: Boolean indicator of whether or not the calling matrix is square.
"""
return self.num_rows == self.num_cols
def to_column_matrix(self) -> 'Matrix':
""" Returns a copy of the calling Matrix expressed as a column matrix, with each row
stacked in sequence.
Returns:
Matrix: A copy of the calling matrix, in column matrix form.
"""
return Matrix.from_column_matrices([row.transpose() for row in self])
class Vector3(Matrix):
""" Class represents a Euclidean vector.
"""
#pylint: disable=arguments-differ
@staticmethod
def zeros() -> 'Vector3':
""" TODO: Method docstring
"""
return Vector3(0, 0, 0)
#pylint: disable=arguments-differ
@staticmethod
def ones() -> 'Vector3':
""" TODO: Method docstring
"""
return Vector3(1,1,1)
@staticmethod
def identity(dim: int) -> 'Vector3':
raise NotImplementedError
@staticmethod
def fill(num_rows: int, num_cols: int, fill_value: float) -> 'Vector3':
raise NotImplementedError
@staticmethod
def empty() -> 'Vector3':
raise NotImplementedError
@staticmethod
def from_matrix(M: Matrix) -> 'Vector3':
""" Factory method to construct a Vector3 from a Matrix. The input Matrix must be of size 3x1
or 1x3 for this operation to be successful.
Args:
M (Matrix): The Matrix from which to construct the Vector3.
Returns:
Vector3: The instantiated Vector3 object.
Raises:
ValueError: Raised if the input Matrix is not of size 3x1 or 1x3.
"""
if M.size not in {(3, 1), (1, 3)}:
raise ValueError("Input matrix must be a row or column matrix of length three.")
return Vector3(*(M.get_col(0) if M.size == (3, 1) else M.get_row(0)))
def __init__(self, x: float = 0.0, y: float = 0.0, z: float = 0.0):
super().__init__([[x], [y], [z]])
@property
def x(self) -> float:
""" TODO: Property docstring
"""
return self[0, 0]
@x.setter
def x(self, value: float):
""" TODO: Property docstring
"""
self[0,0] = value
@property
def y(self) -> float:
""" TODO: Property docstring
"""
return self[1,0]
@y.setter
def y(self, value: float):
""" TODO: Property docstring
"""
self[1,0] = value
@property
def z(self) -> float:
""" TODO: Property docstring
"""
return self[2,0]
@z.setter
def z(self, value: float):
""" TODO: Property docstring
"""
self[2,0] = value
def __str__(self) -> str:
return f'[x = {self.x}, y = {self.y}, z = {self.z}]'
def __repr__(self) -> str:
return f'[{self.x}, {self.y}, {self.z}]'
def __add__(self, other: Union[Matrix, 'Vector3']) -> 'Vector3':
return Vector3.from_matrix(super().__add__(other))
def __sub__(self, other: Union[Matrix, 'Vector3']) -> 'Vector3':
return Vector3.from_matrix(super().__sub__(other))
def __rsub__(self, other: Union[Matrix, 'Vector3']) -> 'Vector3':
return Vector3.from_matrix(super().__rsub__(other))
def __abs__(self) -> 'Vector3':
return Vector3.from_matrix(super().__abs__())
def __neg__(self) -> 'Vector3':
return Vector3.from_matrix(super().__neg__())
def norm(self) -> float:
""" Returns the Euclidean norm of the calling vector.
"""
return sqrt(self.x**2 + self.y**2 + self.z**2)
def norm_2(self) -> float:
""" Returns the square of the Euclidean norm of the calling vector.
"""
return self.x**2 + self.y**2 + self.z**2
def cross(self, other: 'Vector3') -> 'Vector3':
""" Returns the cross product of the calling vector with the argument
vector, computed as C = A x B for C = A.cross(B).
"""
if not isinstance(other, Vector3):
return NotImplemented
x = self.y * other.z - self.z * other.y
y = self.z * other.x - self.x * other.z
z = self.x * other.y - self.y * other.x
return Vector3(x, y, z)
def dot(self, other: 'Vector3') -> float:
""" Returns the dot product of the calling vector with the argument
vector, computed as C = A * B for C = A.dot(B).
"""
if not isinstance(other, Vector3):
return NotImplemented
return self.x * other.x + self.y * other.y + self.z * other.z
def vertex_angle(self, other: 'Vector3') -> float:
""" Returns the angle between the calling vector and the
argument vector, measured from the calling vector. If
either vector is a zero vector an angle of 0.0 radians
will be returned.
Args:
other (Vector3): Vector to which to compute the
vertex angle.
Returns:
float: The angle between the two vectors, expressed
in radians.
"""
if not isinstance(other, Vector3):
return NotImplemented
m = self.norm()
#TODO Replace hard-zero check below with machine precision-tolerant division
return acos(self.dot(other) / (m * other.norm())) if m else 0.0
def normalize(self) -> 'Vector3':
""" Normalizes the calling vector in place by its Euclidean norm. """
m = self.norm()
self[0, 0] /= m
self[1, 0] /= m
self[2, 0] /= m
def normalized(self) -> 'Vector3':
""" Returns the calling vector, normalized by its Euclidean norm. """
m = self.norm()
#TODO Replace hard-zero check below with machine precision-tolerant division
return Vector3(self.x / m, self.y / m, self.z / m) if m else Vector3.zeros()
class TimeSpan:
""" Class represents a time structure supporting nanosecond precision.
"""
@staticmethod
def undefined() -> 'TimeSpan':
""" Factory method to create an undefined TimeSpan.
"""
return TimeSpan(None, None)
@staticmethod
def zero() -> 'TimeSpan':
""" Factory method to create a zero TimeSpan.
"""
return TimeSpan(0, 0)
@staticmethod
def from_seconds(seconds: float) -> 'TimeSpan':
""" Factory method to create a TimeSpan from a number of seconds.
"""
return TimeSpan(*_decompose_decimal_seconds(seconds))
@staticmethod
def from_minutes(minutes: float) -> 'TimeSpan':
""" Factory method to create a TimeSpan from a number of minutes.
"""
return TimeSpan(*_decompose_decimal_seconds(minutes * SECONDS_PER_MINUTE))
@staticmethod
def from_hours(minutes: float) -> 'TimeSpan':
""" Factory method to create a TimeSpan from a number of hours.
"""
return TimeSpan(*_decompose_decimal_seconds(minutes * SECONDS_PER_HOUR))
@staticmethod
def from_days(days: float) -> 'TimeSpan':
""" Factory method to create a TimeSpan from a number of mean solar days.
"""
return TimeSpan(*_decompose_decimal_seconds(days * SECONDS_PER_SOLAR_DAY))
def __init__(self, whole_seconds: int, nano_seconds: int):
def normalize_time(ws: int, ns: int) -> Tuple[int, int]:
""" Function for normalizing whole vs sub-second digits. """
ws += (copysign(1,ns) * 1)
ns -= (copysign(1,ns) * NANOSECONDS_PER_SECOND)
return ws, ns
self._whole_seconds = None
self._nano_seconds = None
if (whole_seconds is not None) and (nano_seconds is not None):
while abs(nano_seconds) >= NANOSECONDS_PER_SECOND:
whole_seconds, nano_seconds = normalize_time(whole_seconds, nano_seconds)
| |
<filename>vis_utils/animation/skeleton_animation_controller.py
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import json
from copy import deepcopy
from PySignal import Signal
from .animation_controller import AnimationController, CONTROLLER_TYPE_ANIMATION
from .skeleton_visualization import SkeletonVisualization, SKELETON_DRAW_MODE_NONE, SKELETON_DRAW_MODE_LINES, SKELETON_DRAW_MODE_BOXES, SKELETON_DRAW_MODE_CS
from .point_cloud_animation_controller import PointCloudAnimationController
from vis_utils.scene.components import ComponentBase
from vis_utils.io import load_model_from_fbx_file, load_json_file
from vis_utils.scene.utils import get_random_color
from anim_utils.animation_data import BVHReader, BVHWriter, MotionVector, parse_amc_file
from anim_utils.retargeting import retarget_from_src_to_target, retarget_from_point_cloud_to_target
from vis_utils.io.fbx_io import export_motion_vector_to_fbx_file
from anim_utils.animation_data.motion_state import MotionState
from .skeleton_mirror_component import SkeletonMirrorComponent
class SkeletonAnimationControllerBase(ComponentBase):
updated_animation_frame = Signal()
reached_end_of_animation = Signal()
update_scene_object = Signal()
def __init__(self, scene_object):
ComponentBase.__init__(self, scene_object)
class LegacySkeletonAnimationController(SkeletonAnimationControllerBase, AnimationController):
def __init__(self, scene_object):
SkeletonAnimationControllerBase.__init__(self, scene_object)
AnimationController.__init__(self)
self._motion = None
def get_semantic_annotation(self):
return None
def draw(self, modelMatrix, viewMatrix, projectionMatrix, lightSources):
if self.isLoadedCorrectly():
self._visualization.draw(modelMatrix, viewMatrix, projectionMatrix, lightSources)
def update(self, dt):
""" update current frame and global joint transformation matrices
"""
if not self.isLoadedCorrectly():
return
dt *= self.animationSpeed
if self.playAnimation:
self.animationTime += dt
self.currentFrameNumber = int(self.animationTime / self.getFrameTime())
self.updateTransformation()
# update gui
if self.currentFrameNumber > self.getNumberOfFrames():
self.resetAnimationTime()
self.reached_end_of_animation.emit(self.loopAnimation)
else:
self.updated_animation_frame.emit(self.currentFrameNumber)
def isLoadedCorrectly(self):
return self._motion is not None
def updateTransformation(self):
if 0 <= self.currentFrameNumber < self.getNumberOfFrames():
current_frame = self._motion.frames[self.currentFrameNumber]
self._visualization.updateTransformation(current_frame, self.scene_object.transformation)
def updateTransformationFromFrame(self, frame):
self._visualization.updateTransformation(frame, self.scene_object.transformation)
def resetAnimationTime(self):
self.currentFrameNumber = 0
self.animationTime = 0
self.updateTransformation()
def setCurrentFrameNumber(self, frame_idx):
self.currentFrameNumber = frame_idx
self.animationTime = self.getFrameTime() * frame_idx
self.updateTransformation()
def getNumberOfFrames(self):
return self._motion.n_frames
def getFrameTime(self):
if self.isLoadedCorrectly():
return self._motion.frame_time
else:
return 0
def toggle_animation_loop(self):
self.loopAnimation = not self.loopAnimation
class SkeletonAnimationController(SkeletonAnimationControllerBase):
""" The class controls the pose of a skeleton based on an instance of a MotionState class.
The scene containing a controller connects to signals emitted by an instance of the class and relays them to the GUI.
"""
def __init__(self, scene_object):
SkeletonAnimationControllerBase.__init__(self, scene_object)
self.loadedCorrectly = False
self.hasVisualization = False
self.filePath = ""
self.name = ""
self._visualization = None
self._motion = None
self.markers = dict()
self.recorder = None
self.relative_root = False
self.root_pos = None
self.root_q = None
self.type = CONTROLLER_TYPE_ANIMATION
self.animationSpeed = 1.0
self.loopAnimation = False
self.activate_emit = True
self.visualize = True
def set_skeleton(self, skeleton, visualize=True):
self.visualize = visualize
if visualize:
self._visualization.set_skeleton(skeleton, visualize)
def set_motion(self, motion):
self._motion = MotionState(motion)
def set_color_annotation(self, semantic_annotation, color_map):
self._motion.set_color_annotation(semantic_annotation, color_map)
def set_time_function(self, time_function):
self._motion.set_time_function(time_function)
def set_color_annotation_legacy(self, annotation, color_map):
self._motion.set_color_annotation_legacy(annotation, color_map)
def set_random_color_annotation(self):
self._motion.set_random_color_annotation()
def set_visualization(self, visualization, draw_mode=SKELETON_DRAW_MODE_BOXES):
self._visualization = visualization
self._visualization.draw_mode = draw_mode
self._visualization.updateTransformation(self._motion.get_pose(), self.scene_object.scale_matrix)
def update(self, dt):
""" update current frame and global joint transformation matrices
"""
if not self.isLoadedCorrectly():
return
reset = self._motion.update(dt*self.animationSpeed)
if self._motion.play:
self.updateTransformation()
# update gui
if reset:
self.reached_end_of_animation.emit(self.loopAnimation)
self._motion.play = self.loopAnimation
else:
if self.activate_emit:
self.updated_animation_frame.emit(self._motion.get_current_frame_idx())
def draw(self, modelMatrix, viewMatrix, projectionMatrix, lightSources):
if self.isLoadedCorrectly():
self._visualization.draw(modelMatrix, viewMatrix, projectionMatrix, lightSources)
def updateTransformation(self):
if self.relative_root:
return
self.set_transformation_from_frame(self._motion.get_pose())
def set_transformation_from_frame(self, frame):
if frame is None:
return
self._visualization.updateTransformation(frame, self.scene_object.scale_matrix)
#self.update_markers()
self.updateAnnotation()
def updateAnnotation(self):
if self._motion.get_current_frame_idx() < self._motion.get_n_annotations():
current_annotation = self._motion.get_current_annotation()
self._visualization.set_color(current_annotation["color"])
def get_current_annotation_label(self):
return self._motion.get_current_annotation_label()
def resetAnimationTime(self):
self._motion.reset()
self.updateTransformation()
def setCurrentFrameNumber(self, frame_idx):
self._motion.set_frame_idx(frame_idx)
self.updateTransformation()
#self.update_markers()
def getNumberOfFrames(self):
return self._motion.get_n_frames()
def isLoadedCorrectly(self):
return self._motion is not None
def getFrameTime(self):
if self.isLoadedCorrectly():
# print self.frameTime
return self._motion.get_frame_time()
else:
return 0
def getScaleFactor(self):
if self.isLoadedCorrectly():
return self.scaleFactor
else:
return -1
def getFilePath(self):
if self.isLoadedCorrectly():
return self.filePath
def getNumberOfJoints(self):
return len(self._visualization.skeleton.get_n_joints())
def setColor(self, color):
print("set color", color)
self._visualization.set_color(color)
def getColor(self):
return self._visualization.color
def getPosition(self):
m = self.scene_object.transformation
if self._motion is not None:
root = self._visualization.skeleton.root
pos = self._visualization.skeleton.nodes[root].offset + self._motion.get_pose()[:3]
pos = [pos[0], pos[1], pos[2], 1]
pos = np.dot(m, pos)[:3]
return np.array(pos)
else:
return m[3,:3]
def get_visualization(self):
return self._visualization
def create_ragdoll(self, use_reference_frame=True, create_markers=True):
if self._motion is not None and self._visualization.skeleton.skeleton_model is not None:
frame = self._motion.get_pose()
skeleton = self._visualization.skeleton
if use_reference_frame:
frame = skeleton.get_reduced_reference_frame()
o = self.scene_object.scene.object_builder.create_component("ragdoll_from_skeleton", skeleton, frame, figure_def, add_contact_vis=False)
#o = self.scene_object.scene.object_builder.create_ragdoll_from_skeleton(self._visualization.skeleton, frame)
self.scene_object.scene.addAnimationController(o, "character_animation_recorder")
self.recorder = o._components["character_animation_recorder"]
if create_markers:
self.create_markers()
def create_markers(self, figure_def, scale=1.0):
if self.recorder is not None:
markers = self.recorder.generate_constraint_markers_v9(self, scale, figure_def)
self.attach_constraint_markers(markers)
def attach_constraint_markers(self, markers):
self.markers = markers
def detach_constraint_markers(self):
self.markers = dict()
def update_markers(self):
frame = self._motion.get_pose()
scale = self.scene_object.scale_matrix[0][0]
for joint in list(self.markers.keys()):
for marker in self.markers[joint]:
m = self._visualization.skeleton.nodes[joint].get_global_matrix(frame, True)
position = np.dot(m, marker["relative_trans"])[:3, 3]
marker["object"].setPosition(position*scale)
def toggle_animation_loop(self):
self.loopAnimation = not self.loopAnimation
def get_bvh_string(self):
skeleton = self._visualization.skeleton
print("generate bvh string", len(skeleton.animated_joints))
frames = self._motion.get_frames()
frames = skeleton.add_fixed_joint_parameters_to_motion(frames)
frame_time = self._motion.get_frame_time()
bvh_writer = BVHWriter(None, skeleton, frames, frame_time, True)
return bvh_writer.generate_bvh_string()
def get_json_data(self):
self._motion.mv.skeleton = self._visualization.skeleton
return self._motion.mv.to_db_format()
def export_to_file(self, filename, export_format="bvh", frame_range=None):
if self._motion is not None:
frame_time = self._motion.get_frame_time()
if export_format == "bvh":
skeleton = self._visualization.skeleton
frames = self._motion.get_frames()
frames = np.array(frames)
if frames is not None:
print("frames shape", frames.shape)
else:
print("frames is none")
print("ref framee length",skeleton.reference_frame_length)
joint_count = 0
for joint_name in skeleton.nodes.keys():
if len(skeleton.nodes[joint_name].children) > 0 and "EndSite" not in joint_name:
joint_count+=1
skeleton.reference_frame_length = joint_count * 4 + 3
frames = skeleton.add_fixed_joint_parameters_to_motion(frames)
if frame_range is not None:
bvh_writer = BVHWriter(None, skeleton, frames[frame_range[0]:frame_range[1],:], frame_time, True)
else:
bvh_writer = BVHWriter(None, skeleton, frames, frame_time, True)
bvh_writer.write(filename)
elif export_format == "fbx":
export_motion_vector_to_fbx_file(self._visualization.skeleton,
self._motion, filename)
elif export_format == "json":
self._visualization.skeleton.save_to_json(filename)
else:
print("unsupported format", export_format)
def retarget_from_src(self, src_controller, scale_factor=1.0, src_model=None, target_model=None, frame_range=None):
target_skeleton = self._visualization.skeleton
frame_time = src_controller.get_frame_time()
if target_model is not None:
target_skeleton.skeleton_model = target_model
new_frames = None
if type(src_controller) == SkeletonAnimationController:
src_skeleton = src_controller._visualization.skeleton
src_frames = src_controller._motion.get_frames()
if src_model is not None:
src_skeleton.skeleton_model = src_model
if src_skeleton.identity_frame is None or target_skeleton.identity_frame is None:
raise Exception("Error identiframe is None")
new_frames = retarget_from_src_to_target(src_skeleton, target_skeleton, src_frames, scale_factor=scale_factor, frame_range=frame_range)
elif type(src_controller) == PointCloudAnimationController:
src_joints = src_controller._joints
src_frames = src_controller._animated_points
if src_model is None:
src_model = src_controller.skeleton_model
new_frames = retarget_from_point_cloud_to_target(src_joints, src_model, target_skeleton, src_frames, scale_factor=scale_factor, frame_range=frame_range)
if new_frames is not None:
self._motion.mv.frames = new_frames
self._motion.mv.n_frames = len(new_frames)
self._motion.frame_idx = 0
self._motion.mv.frame_time = frame_time
self.currentFrameNumber = 0
self.updateTransformation()
self.update_scene_object.emit(-1)
self.updated_animation_frame.emit(self.currentFrameNumber)
print("finished retargeting", self._motion.get_n_frames(), "frames")
return self._motion.get_n_frames()
def retarget_from_frames(self, src_skeleton, src_frames, scale_factor=1.0, target_model=None, frame_range=None, place_on_ground=False, joint_filter=None):
target_skeleton = self._visualization.skeleton
if target_model is not None:
target_skeleton.skeleton_model = target_model
new_frames = retarget_from_src_to_target(src_skeleton, target_skeleton, src_frames,
scale_factor=scale_factor, frame_range=frame_range, place_on_ground=place_on_ground, joint_filter=joint_filter)
if new_frames is not None:
self._motion.mv.frames = new_frames
self._motion.mv.n_frames = len(new_frames)
print("finished retargeting", self._motion.get_n_frames(), "frames")
return self._motion.get_n_frames()
def set_scale(self, scale_factor):
#self._visualization.set_scale(scale_factor)
color = self._visualization.color
#self._motion.mv.frames[:,:3] *= scale_factor
skeleton = self._visualization.skeleton
skeleton.scale(scale_factor)
self._motion.mv.scale_root(scale_factor)
self._visualization = SkeletonVisualization(self.scene_object, color)
self._visualization.set_skeleton(skeleton)
self.updateTransformation()
self.scene_object.transformation = np.eye(4)
def load_annotation(self, filename):
with open(filename, "r") as in_file:
annotation_data = json.load(in_file)
semantic_annotation = annotation_data["semantic_annotation"]
color_map = annotation_data["color_map"]
self.set_color_annotation(semantic_annotation, color_map)
def save_annotation(self, filename):
with open(filename, "w") as out_file:
data = dict()
data["semantic_annotation"] = self._motion._semantic_annotation
data["color_map"] = self._motion.label_color_map
json.dump(data, out_file)
def plot_joint_trajectories(self, joint_list):
joint_objects = []
for j in joint_list:
o = self.plot_joint_trajectory(j)
if o is not None:
joint_objects.append(o)
return joint_objects
def plot_joint_trajectory(self, joint_name):
scene_object = None
if joint_name in list(self._visualization.skeleton.nodes.keys()):
trajectory = list()
for f in self._motion.get_frames():
p = self.get_joint_position(joint_name, f)
if p is not None:
trajectory.append(p)
if len(trajectory) > 0:
name = self.scene_object.name + "_" + joint_name + "_trajectory"
scene_object = self.scene_object.scene.addSplineObject(name, trajectory, get_random_color(), granularity=500)
else:
print("No points to plot for joint", joint_name)
return scene_object
def get_joint_position(self, joint_name, frame):
if joint_name in self._visualization.skeleton.nodes.keys():
return self._visualization.skeleton.nodes[joint_name].get_global_position(frame)
else:
return None
def get_skeleton_copy(self):
return deepcopy(self._visualization.skeleton)
def get_motion_vector_copy(self, start_frame=0, end_frame=-1):
mv_copy = MotionVector()
if end_frame > 0:
mv_copy.frames = deepcopy(self._motion.mv.frames[start_frame: end_frame])
else:
mv_copy.frames = np.array(self._motion.mv.frames)
mv_copy.n_frames = len(mv_copy.frames)
mv_copy.frame_time = self._motion.mv.frame_time
return mv_copy
def get_current_frame(self):
return | |
# Save the official application info. They will be
# persisted in the next status update
app.regenerate_application_info(name, version, patches)
if not cutils.verify_checksum(app.inst_path):
_handle_extract_failure('checksum validation failed.')
mname, mfile = self._utils._find_manifest_file(app.inst_path)
# Save the official manifest file info. They will be persisted
# in the next status update
app.regenerate_manifest_filename(mname, os.path.basename(mfile))
else:
name, version, patches = cutils.find_metadata_file(
app.inst_path, constants.APP_METADATA_FILE)
app.patch_dependencies = patches
self._utils._extract_helm_charts(app.inst_path)
except exception.SysinvException as e:
_handle_extract_failure(str(e))
except OSError as e:
LOG.error(e)
_handle_extract_failure()
finally:
os.chown(constants.APP_INSTALL_ROOT_PATH, orig_uid, orig_gid)
def get_image_tags_by_charts(self, app_images_file, app_manifest_file, overrides_dir):
""" Mine the image tags for charts from the images file. Add the
image tags to the manifest file if the image tags from the
charts do not exist in the manifest file. Convert the image
tags in in both override files and manifest file. Intended
for both system and custom apps.
The image tagging conversion(local docker registry address prepended):
${LOCAL_REGISTRY_SERVER}:${REGISTRY_PORT}/<image-name>
(ie..registry.local:9001/docker.io/mariadb:10.2.13)
"""
app_imgs = []
manifest_update_required = False
if os.path.exists(app_images_file):
with io.open(app_images_file, 'r', encoding='utf-8') as f:
images_file = yaml.safe_load(f)
if os.path.exists(app_manifest_file):
with io.open(app_manifest_file, 'r', encoding='utf-8') as f:
# The RoundTripLoader removes the superfluous quotes by default,
# resulting the dumped out charts not readable in Armada.
# Set preserve_quotes=True to preserve all the quotes.
charts = list(yaml.load_all(
f, Loader=yaml.RoundTripLoader, preserve_quotes=True))
for chart in charts:
if "armada/Chart/" in chart['schema']:
chart_data = chart['data']
chart_name = chart_data['chart_name']
chart_namespace = chart_data['namespace']
# Get the image tags by chart from the images file
helm_chart_imgs = {}
if chart_name in images_file:
helm_chart_imgs = images_file[chart_name]
# Get the image tags from the chart overrides file
overrides = chart_namespace + '-' + chart_name + '.yaml'
app_overrides_file = os.path.join(overrides_dir, overrides)
overrides_file = {}
if os.path.exists(app_overrides_file):
with io.open(app_overrides_file, 'r', encoding='utf-8') as f:
overrides_file = yaml.safe_load(f)
override_imgs = self._image.find_images_in_dict(
overrides_file.get('data', {}).get('values', {}))
override_imgs_copy = copy.deepcopy(override_imgs)
# Get the image tags from the armada manifest file
armada_chart_imgs = self._image.find_images_in_dict(
chart_data.get('values', {}))
armada_chart_imgs_copy = copy.deepcopy(armada_chart_imgs)
armada_chart_imgs = self._image.merge_dict(helm_chart_imgs, armada_chart_imgs)
# Update image tags with local registry prefix
override_imgs = self._image.update_images_with_local_registry(override_imgs)
armada_chart_imgs = self._image.update_images_with_local_registry(armada_chart_imgs)
# Generate a list of required images by chart
download_imgs = copy.deepcopy(armada_chart_imgs)
download_imgs = self._image.merge_dict(download_imgs, override_imgs)
download_imgs_list = self._image.generate_download_images_list(download_imgs, [])
app_imgs.extend(download_imgs_list)
# Update chart override file if needed
if override_imgs != override_imgs_copy:
with open(app_overrides_file, 'w') as f:
try:
overrides_file['data']['values'] = self._image.merge_dict(
overrides_file['data']['values'], override_imgs)
yaml.safe_dump(overrides_file, f, default_flow_style=False)
LOG.info("Overrides file %s updated with new image tags" %
app_overrides_file)
except (TypeError, KeyError):
LOG.error("Overrides file %s fails to update" %
app_overrides_file)
# Update armada chart if needed
if armada_chart_imgs != armada_chart_imgs_copy:
# This is to convert a empty orderedDict to dict
if 'values' in chart_data:
if not chart_data['values']:
chart_data['values'] = {}
chart_data['values'] = self._image.merge_dict(
chart_data.get('values', {}), armada_chart_imgs)
manifest_update_required = True
# Update manifest file if needed
if manifest_update_required:
with open(app_manifest_file, 'w') as f:
try:
yaml.dump_all(charts, f, Dumper=yaml.RoundTripDumper,
explicit_start=True, default_flow_style=False)
LOG.info("Manifest file %s updated with new image tags" %
app_manifest_file)
except Exception as e:
LOG.error("Manifest file %s fails to update with "
"new image tags: %s" % (app_manifest_file, e))
return list(set(app_imgs))
def _register_embedded_images(self, app):
"""
TODO(tngo): When we're ready to support air-gap scenario and private
images, the following need to be done:
a. load the embedded images
b. tag and push them to the docker registery on the controller
c. find image tag IDs in each chart and replace their values with
new tags. Alternatively, document the image tagging convention
${LOCAL_REGISTRY_SERVER}:${REGISTRY_PORT}/<image-name>
(e.g. registry.local:9001/prom/mysqld-exporter)
to be referenced in the application Helm charts.
"""
raise exception.KubeAppApplyFailure(
name=app.name,
version=app.version,
reason="embedded images are not yet supported.")
def _save_images_list(self, app):
# Extract the list of images from the charts and overrides where
# applicable. Save the list to the same location as the armada manifest
# so it can be sync'ed.
app.charts = self._get_list_of_charts(app.sync_armada_mfile)
self._plugins.activate_plugins(app)
LOG.info("Generating application overrides to discover required images.")
self._helm.generate_helm_application_overrides(
app.sync_overrides_dir, app.name, mode=None, cnamespace=None,
armada_format=True, armada_chart_info=app.charts, combined=True)
self._plugins.deactivate_plugins(app)
self._save_images_list_by_charts(app)
# Get the list of images from the updated images overrides
images_to_download = self.get_image_tags_by_charts(
app.sync_imgfile, app.sync_armada_mfile, app.sync_overrides_dir)
if not images_to_download:
# TODO(tngo): We may want to support the deployment of apps that
# set up resources only in the future. In which case, generate
# an info log and let it advance to the next step.
raise exception.KubeAppUploadFailure(
name=app.name,
version=app.version,
reason="charts specify no docker images.")
with open(app.sync_imgfile, 'a') as f:
yaml.safe_dump({"download_images": images_to_download}, f,
default_flow_style=False)
def _save_images_list_by_charts(self, app):
from six.moves.urllib.parse import urlparse
# Mine the images from values.yaml files in the charts directory.
# The list of images for each chart are saved to the images file.
images_by_charts = {}
for chart in app.charts:
chart_name = os.path.join(app.inst_charts_dir, chart.name)
if not os.path.exists(chart_name):
# If the helm chart name is not the same as the armada
# chart name in the manifest, try using the source
# to find the chart directory.
try:
# helm charts should be of the standard format:
# <chartname>-X.X.X.tgz
url_path = os.path.basename(urlparse(chart.location).path)
# strip the .tgz
chart_and_version = re.sub('\.tgz$', '', url_path)
# strip the version
chart_name_no_version = re.sub('-(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)',
'', chart_and_version)
chart_name = os.path.join(app.inst_charts_dir, chart_name_no_version)
except Exception as e:
LOG.info("Cannot parse chart path: %s" % e)
pass
chart_path = os.path.join(chart_name, 'values.yaml')
if os.path.exists(chart_path):
with io.open(chart_path, 'r', encoding='utf-8') as f:
y = yaml.safe_load(f)
chart_images = self._image.find_images_in_dict(y)
if chart_images:
images_by_charts.update({chart.name: chart_images})
with open(app.sync_imgfile, 'w') as f:
yaml.safe_dump(images_by_charts, f, explicit_start=True,
default_flow_style=False)
def _retrieve_images_list(self, app_images_file):
with io.open(app_images_file, 'r', encoding='utf-8') as f:
images_list = yaml.safe_load(f)
return images_list
def download_images(self, app):
if os.path.isdir(app.inst_images_dir):
return self._register_embedded_images(app)
if app.system_app:
# Some images could have been overwritten via user overrides
# between upload and apply, or between applies. Refresh the
# saved images list.
saved_images_list = self._retrieve_images_list(app.sync_imgfile)
saved_download_images_list = list(saved_images_list.get("download_images"))
images_to_download = self.get_image_tags_by_charts(
app.sync_imgfile, app.sync_armada_mfile, app.sync_overrides_dir)
if set(saved_download_images_list) != set(images_to_download):
saved_images_list.update({"download_images": images_to_download})
with open(app.sync_imgfile, 'w') as f:
yaml.safe_dump(saved_images_list, f, explicit_start=True,
default_flow_style=False)
else:
images_to_download = self._retrieve_images_list(
app.sync_imgfile).get("download_images")
total_count = len(images_to_download)
threads = min(MAX_DOWNLOAD_THREAD, total_count)
start = time.time()
try:
registries_info = self._docker.retrieve_specified_registries()
except Exception as e:
raise exception.KubeAppApplyFailure(
name=app.name,
version=app.version,
reason=str(e))
for idx in reversed(range(MAX_DOWNLOAD_ATTEMPTS)):
pool = greenpool.GreenPool(size=threads)
for tag, success in pool.imap(
functools.partial(self._docker.download_an_image,
app.name,
registries_info),
images_to_download):
if success:
continue
if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppApplyFailure(
name=app.name,
version=app.version,
reason="operation aborted by user.")
else:
LOG.info("Failed to download image: %s", tag)
break
else:
elapsed = time.time() - start
LOG.info("All docker images for application %s were successfully "
"downloaded in %d seconds", app.name, elapsed)
break
# don't sleep after last download attempt
if idx:
LOG.info("Retry docker images download for application %s "
"after %d seconds", app.name, DOWNLOAD_WAIT_BEFORE_RETRY)
time.sleep(DOWNLOAD_WAIT_BEFORE_RETRY)
else:
raise exception.KubeAppApplyFailure(
name=app.name,
version=app.version,
reason=constants.APP_PROGRESS_IMAGES_DOWNLOAD_FAILED)
def _validate_helm_charts(self, app):
failed_charts = []
for r, f in cutils.get_files_matching(app.inst_charts_dir, 'Chart.yaml'):
# Eliminate redundant validation for system app
if app.system_app and '/charts/helm-toolkit' in r:
continue
try:
output = subprocess.check_output( # pylint: disable=not-callable
['helm', 'lint', r], universal_newlines=True)
if "linted, 0 chart(s) failed" in output:
LOG.info("Helm chart %s validated" % os.path.basename(r))
else:
LOG.error("Validation failed for helm chart %s" %
os.path.basename(r))
failed_charts.append(r)
except Exception as e:
raise exception.KubeAppUploadFailure(
name=app.name, version=app.version, reason=str(e))
if len(failed_charts) > 0:
raise exception.KubeAppUploadFailure(
name=app.name, version=app.version, reason="one or more charts failed validation.")
def _get_chart_data_from_metadata(self, app):
"""Get chart related data from application metadata
This extracts the helm repo from the application metadata where the
chart should be loaded.
This also returns the list of charts that are disabled by default.
:param app: application
"""
repo = common.HELM_REPO_FOR_APPS
disabled_charts = []
lfile = os.path.join(app.inst_path, constants.APP_METADATA_FILE)
if os.path.exists(lfile) and os.path.getsize(lfile) > 0:
with io.open(lfile, 'r', encoding='utf-8') as f:
try:
y = yaml.safe_load(f)
repo = y.get('helm_repo', common.HELM_REPO_FOR_APPS)
disabled_charts = y.get('disabled_charts', [])
except KeyError:
pass
LOG.info("Application %s (%s) will load charts to chart repo %s" % (
app.name, app.version, repo))
LOG.info("Application %s (%s) will disable charts %s by default" % (
app.name, app.version, disabled_charts))
return (repo, disabled_charts)
def _upload_helm_charts(self, app):
# Set env path for helm-upload execution
env = os.environ.copy()
env['PATH'] = '/usr/local/sbin:' + env['PATH']
charts = [os.path.join(r, f)
for r, f in cutils.get_files_matching(app.inst_charts_dir, '.tgz')]
orig_uid, orig_gid = get_app_install_root_path_ownership()
(helm_repo, disabled_charts) = self._get_chart_data_from_metadata(app)
try:
# Temporarily change /scratch group ownership to sys_protected
os.chown(constants.APP_INSTALL_ROOT_PATH, orig_uid,
grp.getgrnam(constants.SYSINV_SYSADMIN_GRPNAME).gr_gid)
with open(os.devnull, "w") as fnull:
for chart | |
u0 {3,D}
7 C u0 {4,D}
""",
thermo = u'Cds-(Cdd-O2d)(Cds-Cds)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cd)Cb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 S2d u0 {2,D}
6 C u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cds)Cb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 S2d u0 {2,D}
6 Cd u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cdd)Cb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 S2d u0 {2,D}
6 Cdd u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cdd-S2d)Cb",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 Cb u0 {1,S}
6 S2d u0 {3,D}
7 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cdd-Cd)Cb",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 Cb u0 {1,S}
6 S2d u0 {3,D}
7 C u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 318,
label = "Cds-(Cdd-Cd)(Cds-Cd)Cb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 C u0 {2,D}
6 C u0 {3,D}
""",
thermo = u'Cds-(Cdd-Cd)(Cds-Cds)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 319,
label = "Cds-(Cdd-Cd)(Cds-Cds)Cb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 C u0 {2,D}
6 Cd u0 {3,D}
""",
thermo = u'Cds-Cds(Cds-Cds)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 320,
label = "Cds-(Cdd-Cd)(Cds-Cdd)Cb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 C u0 {2,D}
6 Cdd u0 {3,D}
""",
thermo = u'Cds-(Cdd-Cd)(Cds-Cdd-Cd)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 321,
label = "Cds-(Cdd-Cd)(Cds-Cdd-O2d)Cb",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 Cb u0 {1,S}
6 C u0 {3,D}
7 O2d u0 {4,D}
""",
thermo = u'Cds-Cds(Cds-Cdd-O2d)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)(Cds-Cdd-S2d)Cb",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 Cb u0 {1,S}
6 C u0 {3,D}
7 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 322,
label = "Cds-(Cdd-Cd)(Cds-Cdd-Cd)Cb",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 Cb u0 {1,S}
6 C u0 {3,D}
7 C u0 {4,D}
""",
thermo = u'Cds-(Cdd-Cd)(Cds-Cds)Cb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 323,
label = "Cds-CddCbCt",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
""",
thermo = u'Cds-(Cdd-Cd)CbCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 324,
label = "Cds-(Cdd-O2d)CbCt",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 O2d u0 {2,D}
""",
thermo = u'Cds-(Cdd-O2d)(Cds-Cds)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)CbCt",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 325,
label = "Cds-(Cdd-Cd)CbCt",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 C u0 {2,D}
""",
thermo = u'Cds-CdsCbCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 326,
label = "Cds-CddCbCb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
""",
thermo = u'Cds-(Cdd-Cd)CbCb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 327,
label = "Cds-(Cdd-O2d)CbCb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 O2d u0 {2,D}
""",
thermo = u'Cds-(Cdd-O2d)(Cds-Cds)(Cds-Cds)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)CbCb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 328,
label = "Cds-(Cdd-Cd)CbCb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 C u0 {2,D}
""",
thermo = u'Cds-CdsCbCb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-CdsC=SC=S",
group =
"""
1 * Cd u0 {2,S} {3,S} {4,D}
2 CS u0 {1,S} {5,D}
3 CS u0 {1,S} {6,D}
4 Cd u0 {1,D}
5 S2d u0 {2,D}
6 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)C=S(Cds-Cd)",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 CS u0 {1,S} {7,D}
4 Cd u0 {1,S} {6,D}
5 C u0 {2,D}
6 C u0 {4,D}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)C=S(Cds-Cds)",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 CS u0 {1,S} {7,D}
4 Cd u0 {1,S} {6,D}
5 C u0 {2,D}
6 Cd u0 {4,D}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)C=S(Cds-Cdd)",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 CS u0 {1,S} {7,D}
4 Cd u0 {1,S} {6,D}
5 C u0 {2,D}
6 Cdd u0 {4,D}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)C=S(Cds-Cdd-Cd)",
group =
"""
1 * Cd u0 {2,S} {3,D} {4,S}
2 Cd u0 {1,S} {5,D}
3 Cdd u0 {1,D} {6,D}
4 CS u0 {1,S} {7,D}
5 Cdd u0 {2,D} {8,D}
6 C u0 {3,D}
7 S2d u0 {4,D}
8 C u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)C=S(Cds-Cdd-S2d)",
group =
"""
1 * Cd u0 {2,S} {3,D} {4,S}
2 Cd u0 {1,S} {5,D}
3 Cdd u0 {1,D} {6,D}
4 CS u0 {1,S} {7,D}
5 Cdd u0 {2,D} {8,D}
6 C u0 {3,D}
7 S2d u0 {4,D}
8 S2d u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)C=SCs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 CS u0 {1,S} {6,D}
4 Cs u0 {1,S}
5 S2d u0 {2,D}
6 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)C=SCt",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 CS u0 {1,S} {6,D}
4 Ct u0 {1,S}
5 S2d u0 {2,D}
6 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)C=SCb",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 CS u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 S2d u0 {2,D}
6 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)C=SC=S",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 CS u0 {1,S} {6,D}
4 CS u0 {1,S} {7,D}
5 C u0 {2,D}
6 S2d u0 {3,D}
7 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cd)C=S",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 CS u0 {1,S} {7,D}
5 S2d u0 {2,D}
6 C u0 {3,D}
7 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cds)C=S",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd | |
import os
import logging
import functools
from typing import Dict, List
from sqlalchemy import create_engine, inspect, Table
from sqlalchemy.schema import CreateSchema
from snowflake.sqlalchemy import URL, TIMESTAMP_NTZ
from snowflake.connector.errors import ProgrammingError
from snowflake.connector.network import ReauthenticationRequest
from target_snowflake.utils.error import SchemaUpdateError
from target_snowflake.utils.snowflake_helpers import get_reserved_keywords
# Don't show all the info log messages from Snowflake
for logger_name in ["snowflake.connector", "botocore", "boto3"]:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.WARNING)
# Map sqlalchemy types to Snowflake Types
# Required for two reasons:
# 1. Compare the sqlalchemy Table definition to what is defined in Snowflake
# 2. Use the type to manually execute an ALTER TABLE for updating or
# adding new columns
MAP_SQLALCHEMY_TO_SNOWFLAKE_TYPE = {
"BIGINT": "DECIMAL(38, 0)",
"FLOAT": "FLOAT",
"VARCHAR": "VARCHAR(16777216)",
"BOOLEAN": "BOOLEAN",
"TIMESTAMP": "TIMESTAMP_NTZ",
}
# Type updates allowed.
# There is a limitation in Snowflake that really limits the possible transitions:
# https://docs.snowflake.net/manuals/sql-reference/sql/alter-table-column.html
# When setting the TYPE for a column, the specified type (i.e. type)
# must be a text data type (VARCHAR, STRING, TEXT, etc.).
# Also, TYPE can be used only to increase the length of a text column.
# That means that no INT --> FLOAT or INT --> STRING, etc type upgrades
# are allowed by Snowflake.
# Together with the fact that sqlalchemy uses the maximum type length,
# only the following 2 type upgrades are valid (which never occur
# but are added for completeness and in order to support updates in
# SnowflakeLoader for future proofing):
ALLOWED_TYPE_TRANSITIONS = [
("VARCHAR(16777216)", "STRING"),
("VARCHAR(16777216)", "TEXT"),
]
# How many times are we going to try to run functions with
# @handle_token_expiration when they raise exceptions.
TokenExpirationMaxTries = 2
def handle_token_expiration(func):
"""
Wrap SnowflakeLoader methods in order to catch token expiration errors,
refresh the engine and retry.
If the session stays idle for 4 hours, then the master token that
snowflake.sqlalchemy has stored expires and a new session token can not be
automatically renewed.
In that case, the following exceptions are raised:
snowflake.connector.errors.ProgrammingError: 390114 (08001)
snowflake.connector.network.ReauthenticationRequest: 390114 (08001)
Authentication token has expired. The user must authenticate again.
We only retry once:
The first try is the normal excecution that will fail if 4 hours have passed
since the last query.
The second try follows a refresh_engine() and should succeed.
If it fails again, then something else happens and we should stop the execution
and report the error.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
last_exception = None
for retry in range(TokenExpirationMaxTries):
try:
return func(self, *args, **kwargs)
except (ProgrammingError, ReauthenticationRequest) as exc:
if "390114" in str(exc):
last_exception = exc
self.refresh_engine()
else:
raise exc
# If we tried TokenExpirationMaxTries times and we keep on getting errors,
# just stop trying and raise the last exception caught
raise last_exception
return wrapper
class SnowflakeEngineFactory:
def __init__(self, config: Dict) -> None:
# Keep the config in the EngineFactory in order to be able to refresh
# the engine if the master token expires
self._config = config
def create_engine(self):
return create_engine(
URL(
user=self._config["username"],
password=self._config["password"],
account=self._config["account"],
database=self._config["database"],
role=self._config["role"],
warehouse=self._config["warehouse"],
)
)
class SnowflakeLoader:
def __init__(self, table: Table, config: Dict) -> None:
self.table = table
# Add a schema to the provided sqlalchemy Table as it is agnostic
# on wich schema we want to use (defined in config)
self.table.schema = config["schema"]
# Keep the database name and the role name as they are required
# for granting privileges to new entities.
self.database = config["database"]
self.role = config["role"]
# Create a SnowflakeEngineFactory with the provided config
# and use it to generate a new engine for connecting to Snowflake
self._engine_factory = SnowflakeEngineFactory(config)
self.engine = self._engine_factory.create_engine()
def refresh_engine(self) -> None:
if self.engine:
self.engine.dispose()
self.engine = self._engine_factory.create_engine()
def quoted_table_name(self) -> str:
"""
Get the FULL, quoted, table name with everything in caps.
e.g. "TEST_DB"."TARGET_SNOWFLAKE_TEST"."TEST_TABLE"
"""
return f'"{self.database}"."{self.table.schema}"."{self.table.name.upper()}"'
def attribute_names(self) -> List[str]:
"""
Get the attribute(column) names for the associated Table
"""
return [column.name for column in self.table.columns]
def empty_record(self) -> Dict:
"""
Get a dictionary representing an empty (all attributes None) record for
the table associated with this SnowflakeLoader instance.
Used as a template in order to normalize (map) all imported records to
the full schema they are defined for.
Important for records with multiple optional attributes that are not
always there, like for example Multi Level JSON objects that are
flattened before uploaded to SNowflake.
Guards against sqlalchemy errors for missing required values for
bind parameters.
"""
return dict.fromkeys(column.name for column in self.table.columns)
@handle_token_expiration
def schema_apply(self) -> None:
"""
Apply the schema defined for self.table to the Database we connect to
"""
grant_required = False
inspector = inspect(self.engine)
all_schema_names = inspector.get_schema_names()
if not (self.table.schema.lower() in all_schema_names):
logging.debug(f"Schema {self.table.schema} does not exist -> creating it ")
self.engine.execute(CreateSchema(self.table.schema))
grant_required = True
all_table_names = inspector.get_table_names(self.table.schema)
if not (self.table.name.lower() in all_table_names):
logging.debug(f"Table {self.table.name} does not exist -> creating it ")
self.table.create(self.engine)
grant_required = True
else:
# There is an existing Table: Check if a schema update is required
self.schema_update(inspector)
if grant_required:
self.grant_privileges(self.role)
def schema_update(self, inspector) -> None:
"""
Check if there is a schema diff between the new Table and the existing
one and if the changes can be supported, update the table with the diff.
Rules:
1. Only support type upgrades (e.g. STRING -> VARCHAR) for existing columns
2. If a not supported type update is requested (e.g. float --> int)
raise a SchemaUpdateError exception.
2. Never drop columns, only update or add new ones
"""
existing_columns = {}
columns_to_add = []
columns_to_update = []
# Fetch the existing defined tables and store them in a format useful
# for comparisors.
all_columns = inspector.get_columns(self.table.name, schema=self.table.schema)
for column in all_columns:
if isinstance(column["type"], TIMESTAMP_NTZ):
existing_columns[column["name"]] = "TIMESTAMP_NTZ"
else:
existing_columns[column["name"]] = f"{column['type']}"
# Check the new Table definition for new attributes or attributes
# with an updated data type
for column in self.table.columns:
if isinstance(column.type, TIMESTAMP_NTZ):
column_type = "TIMESTAMP_NTZ"
else:
column_type = MAP_SQLALCHEMY_TO_SNOWFLAKE_TYPE[f"{column.type}"]
if column.name not in existing_columns:
# A new column to be added to the table
columns_to_add.append((column.name, column_type))
elif column_type != existing_columns[column.name]:
# An existing column with a different data type
# Check if the update is allowed
transition = (existing_columns[column.name], column_type)
if transition not in ALLOWED_TYPE_TRANSITIONS:
raise SchemaUpdateError(
f"Not allowed type update for {self.table.name}.{column.name}: {transition}"
)
columns_to_update.append((column.name, column_type))
# If there are any columns to add or update, make the schema update
for (name, type) in columns_to_add:
self.add_column(name, type)
for (name, type) in columns_to_update:
self.update_column(name, type)
def add_column(self, col_name: str, col_data_type: str) -> None:
"""
Add the requested column to the Snowflake Table defined by self.table
"""
full_name = self.quoted_table_name()
alter_stmt = f"ALTER TABLE {full_name} ADD COLUMN {col_name} {col_data_type}"
logging.debug(f"Adding COLUMN {col_name} ({col_data_type}) to {full_name}")
with self.engine.connect() as connection:
connection.execute(alter_stmt)
def update_column(self, col_name: str, col_data_type: str) -> None:
"""
Update the requested column to the new type col_data_type
"""
full_name = self.quoted_table_name()
alter_stmt = f"ALTER TABLE {full_name} ALTER {col_name} TYPE {col_data_type}"
logging.debug(f"ALTERING COLUMN {full_name}.{col_name} to {col_data_type}")
with self.engine.connect() as connection:
connection.execute(alter_stmt)
@handle_token_expiration
def load(self, data: List[Dict]) -> None:
"""
Load the data provided as a list of dictionaries to the given Table
If there are Primary Keys defined, then we UPSERT them by loading
the data to a temporary table and then using Snowflake's MERGE operation
"""
if not data:
return
logging.debug(f"Loading data to Snowflake for {self.table.name}")
if self.table.primary_key:
# We have to use Snowflake's Merge in order to Upsert
# Create Temporary table to load the data to
tmp_table = self.create_tmp_table()
with self.engine.connect() as connection:
connection.execute(tmp_table.insert(), data)
# Merge Temporary Table into the Table we want to load data into
merge_stmt = self.generate_merge_stmt(tmp_table.name)
connection.execute(merge_stmt)
# Drop the Temporary Table
tmp_table.drop(self.engine)
else:
# Just Insert (append) as no conflicts can arise
with self.engine.connect() as connection:
connection.execute(self.table.insert(), data)
def create_tmp_table(self) -> Table:
"""
Create a temporary table in Snowflake based on the Table definition we
have stored in self.table.
"""
columns = [c.copy() for c in self.table.columns]
tmp_table = Table(
f"TMP_{self.table.name.upper()}",
self.table.metadata,
*columns,
schema=self.table.schema,
keep_existing=True,
)
tmp_table.drop(self.engine, checkfirst=True)
tmp_table.create(self.engine)
return tmp_table
def generate_merge_stmt(self, tmp_table_name: str) -> str:
"""
Generate a merge statement for Merging a temporary table into the
main table.
The | |
1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
expected = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
filter_exclude_positions(aln, m)
assert_allclose(m, expected)
# filter zero positions (max_exclude_percentage = percent exclude)
aln = make_aligned_seqs(
data={"1": "-CDE", "2": "A-DE", "3": "AC-E", "4": "ACD-"},
moltype=PROTEIN,
array_align=True,
)
m = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
expected = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
filter_exclude_positions(aln, m, max_exclude_percent=0.25)
assert_allclose(m, expected)
# filter zero positions (max_exclude_percentage too high)
aln = make_aligned_seqs(
data={"1": "-CDE", "2": "A-DE", "3": "AC-E", "4": "ACD-"},
moltype=PROTEIN,
array_align=True,
)
m = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
expected = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
filter_exclude_positions(aln, m, max_exclude_percent=0.5)
assert_allclose(m, expected)
# filter one position (defualt max_exclude_percentage)
aln = make_aligned_seqs(
data={"1": "-CDE", "2": "ACDE", "3": "ACDE", "4": "ACDE"},
moltype=PROTEIN,
array_align=True,
)
m = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
expected = array(
[
[DEFAULT_NULL_VALUE] * 4,
[DEFAULT_NULL_VALUE, 18.0, 5.0, 6.0],
[DEFAULT_NULL_VALUE, 1.0, 3.0, 2.0],
[DEFAULT_NULL_VALUE, 0.0, 1.0, 33.0],
]
)
filter_exclude_positions(aln, m)
assert_allclose(m, expected)
# filter one position (non-defualt max_exclude_percentage)
aln = make_aligned_seqs(
data={"1": "-CDE", "2": "ACDE", "3": "ACDE", "4": "-CDE"},
moltype=PROTEIN,
array_align=True,
)
m = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
expected = array(
[
[DEFAULT_NULL_VALUE] * 4,
[DEFAULT_NULL_VALUE, 18.0, 5.0, 6.0],
[DEFAULT_NULL_VALUE, 1.0, 3.0, 2.0],
[DEFAULT_NULL_VALUE, 0.0, 1.0, 33.0],
]
)
filter_exclude_positions(aln, m, max_exclude_percent=0.49)
assert_allclose(m, expected)
# filter all positions (defualt max_exclude_percentage)
aln = make_aligned_seqs(
data={"1": "----", "2": "ACDE", "3": "ACDE", "4": "ACDE"},
moltype=PROTEIN,
array_align=True,
)
m = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
expected = array([[DEFAULT_NULL_VALUE] * 4] * 4)
filter_exclude_positions(aln, m)
assert_allclose(m, expected)
# filter all positions (non-defualt max_exclude_percentage)
aln = make_aligned_seqs(
data={"1": "----", "2": "A-DE", "3": "AC--", "4": "-CDE"},
moltype=PROTEIN,
array_align=True,
)
m = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 3.0],
]
)
expected = array([[DEFAULT_NULL_VALUE] * 4] * 4)
filter_exclude_positions(aln, m, max_exclude_percent=0.49)
assert_allclose(m, expected)
# filter one position (defualt max_exclude_percentage,
# non-defualt excludes)
aln = make_aligned_seqs(
data={"1": "WCDE", "2": "ACDE", "3": "ACDE", "4": "ACDE"},
moltype=PROTEIN,
array_align=True,
)
m = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
expected = array(
[
[DEFAULT_NULL_VALUE] * 4,
[DEFAULT_NULL_VALUE, 18.0, 5.0, 6.0],
[DEFAULT_NULL_VALUE, 1.0, 3.0, 2.0],
[DEFAULT_NULL_VALUE, 0.0, 1.0, 33.0],
]
)
filter_exclude_positions(aln, m, excludes="W")
assert_allclose(m, expected)
# filter one position (defualt max_exclude_percentage,
# non-defualt null_value)
aln = make_aligned_seqs(
data={"1": "-CDE", "2": "ACDE", "3": "ACDE", "4": "ACDE"},
moltype=PROTEIN,
array_align=True,
)
m = array(
[
[1.0, 10.0, 4.0, 3.0],
[9.0, 18.0, 5.0, 6.0],
[4.0, 1.0, 3.0, 2.0],
[21.0, 0.0, 1.0, 33.0],
]
)
expected = array(
[
[999.0] * 4,
[999.0, 18.0, 5.0, 6.0],
[999.0, 1.0, 3.0, 2.0],
[999.0, 0.0, 1.0, 33.0],
]
)
filter_exclude_positions(aln, m, null_value=999.0)
assert_allclose(m, expected)
def test_filter_exclude_positions_intermolecular(self):
"""filter_exclude_positions: functions for intermolecular data"""
# these tests correspond to alignments of length 4 and 2 positions
# respectively, hence a coevolution_matrix with shape = (2,4)
# filter zero positions (no excludes)
merged_aln = make_aligned_seqs(
data={"1": "WCDEDE", "2": "ACDEDE", "3": "ACDEDE", "4": "ACDEDE"},
moltype=PROTEIN,
array_align=True,
)
m = array([[1.0, 10.0, 4.0, 3.0], [9.0, 18.0, 5.0, 6.0]])
expected = array([[1.0, 10.0, 4.0, 3.0], [9.0, 18.0, 5.0, 6.0]])
filter_exclude_positions(merged_aln, m, intermolecular_data_only=True)
assert_allclose(m, expected)
# filter one position (aln1)
merged_aln = make_aligned_seqs(
data={"1": "WC-EDE", "2": "ACDEDE", "3": "ACDEDE", "4": "ACDEDE"},
moltype=PROTEIN,
array_align=True,
)
m = array([[1.0, 10.0, 4.0, 3.0], [9.0, 18.0, 5.0, 6.0]])
expected = array(
[[1.0, 10.0, DEFAULT_NULL_VALUE, 3.0], [9.0, 18.0, DEFAULT_NULL_VALUE, 6.0]]
)
filter_exclude_positions(merged_aln, m, intermolecular_data_only=True)
assert_allclose(m, expected)
# filter one position (aln2)
merged_aln = make_aligned_seqs(
data={"1": "WCEEDE", "2": "ACDEDE", "3": "ACDEDE", "4": "ACDED-"},
moltype=PROTEIN,
array_align=True,
)
m = array([[1.0, 10.0, 4.0, 3.0], [9.0, 18.0, 5.0, 6.0]])
expected = array([[1.0, 10.0, 4.0, 3.0], [DEFAULT_NULL_VALUE] * 4])
filter_exclude_positions(merged_aln, m, intermolecular_data_only=True)
assert_allclose(m, expected)
# filter two positions (aln1 & aln2)
merged_aln = make_aligned_seqs(
data={"1": "-CEEDE", "2": "ACDEDE", "3": "ACDEDE", "4": "ACDED-"},
moltype=PROTEIN,
array_align=True,
)
m = array([[1.0, 10.0, 4.0, 3.0], [9.0, 18.0, 5.0, 6.0]])
expected = array(
[[DEFAULT_NULL_VALUE, 10.0, 4.0, 3.0], [DEFAULT_NULL_VALUE] * 4]
)
filter_exclude_positions(merged_aln, m, intermolecular_data_only=True)
assert_allclose(m, expected)
# filter two positions (aln1 & aln2, alt excludes)
merged_aln = make_aligned_seqs(
data={"1": "WCEEDE", "2": "ACDEDE", "3": "ACDEDE", "4": "ACDEDW"},
moltype=PROTEIN,
array_align=True,
)
m = array([[1.0, 10.0, 4.0, 3.0], [9.0, 18.0, 5.0, 6.0]])
expected = array(
[[DEFAULT_NULL_VALUE, 10.0, 4.0, 3.0], [DEFAULT_NULL_VALUE] * 4]
)
filter_exclude_positions(
merged_aln, m, intermolecular_data_only=True, excludes="W"
)
assert_allclose(m, expected)
# filter two positions (aln1 & aln2, alt null_value)
merged_aln = make_aligned_seqs(
data={"1": "-CEEDE", "2": "ACDEDE", "3": "ACDEDE", "4": "ACDED-"},
moltype=PROTEIN,
array_align=True,
)
m = array([[1.0, 10.0, 4.0, 3.0], [9.0, 18.0, 5.0, 6.0]])
expected = array([[999.0, 10.0, 4.0, 3.0], [999.0] * 4])
filter_exclude_positions(
merged_aln, m, intermolecular_data_only=True, null_value=999.0
)
assert_allclose(m, expected)
def test_filter_threshold_based_multiple_interdependency_intermolecular(self):
"multiple interdependency filter functions with intermolecular data"
## cmp_function = ge
# lower boundary
null = DEFAULT_NULL_VALUE
m = array(
[
[0.63, 0.00, null],
[0.75, 0.10, 0.45],
[0.95, 0.32, 0.33],
[1.00, 0.95, 0.11],
]
)
expected = array(
[
[null, null, null],
[null, null, 0.45],
[null, null, null],
[null, null, null],
]
)
actual = filter_threshold_based_multiple_interdependency(
None, m, 0.95, 0, greater_equal, True
)
assert_allclose(actual, expected)
# realisitic test case
m = array(
[
[0.63, 0.00, null],
[0.75, 0.10, 0.45],
[0.95, 0.32, 0.33],
[1.00, 0.95, 0.11],
]
)
expected = array(
[
[null, 0.00, null],
[null, 0.10, 0.45],
[null, 0.32, 0.33],
[null, null, null],
]
)
actual = filter_threshold_based_multiple_interdependency(
None, m, 0.95, 1, greater_equal, True
)
assert_allclose(actual, expected)
# upper boundary, nothing filtered
null = DEFAULT_NULL_VALUE
m = array(
[
[0.63, 0.00, null],
[0.75, 0.10, 0.45],
[0.95, 0.32, 0.33],
[1.00, 0.95, 0.11],
]
)
expected = m
actual = filter_threshold_based_multiple_interdependency(
None, m, 0.95, 5, greater_equal, True
)
assert_allclose(actual, expected)
# cmp_function = less_equal, realistic test case
m = array(
[
[0.63, 0.00, null],
[0.75, 0.10, 0.45],
[0.95, 0.32, 0.33],
[1.00, 0.95, 0.11],
]
)
expected = array(
[
[0.63, null, null],
[0.75, null, null],
[null, null, null],
[1.00, null, null],
]
)
actual = filter_threshold_based_multiple_interdependency(
None, m, 0.35, 1, less_equal, True
)
assert_allclose(actual, expected)
def test_filter_threshold_based_multiple_interdependency_intramolecular(self):
"multiple interdependency filter functions with intramolecular data"
null = DEFAULT_NULL_VALUE
## cmp_function = ge
# lower bound, everything filtered
m = array(
[
[0.63, 0.75, 0.95, 1.00],
[0.75, 0.10, null, 0.95],
[0.95, null, 0.33, 0.11],
[1.00, 0.95, 0.11, 1.00],
]
)
expected = array(
[
[null, null, null, null],
[null, null, null, null],
[null, null, null, null],
[null, null, null, null],
]
)
actual = filter_threshold_based_multiple_interdependency(
None, m, 0.95, 0, greater_equal
)
assert_allclose(actual, expected)
# realistic test case
m = array(
[
[0.63, 0.75, 0.95, 1.00],
[0.75, 0.10, null, 0.95],
[0.95, null, 0.33, 0.11],
[1.00, 0.95, 0.11, 1.00],
]
)
expected = array(
[
[null, null, null, null],
[null, 0.10, null, null],
[null, null, 0.33, null],
[null, null, null, null],
]
)
actual = filter_threshold_based_multiple_interdependency(
None, m, 0.95, 1, greater_equal
)
assert_allclose(actual, expected)
# upper boundary, nothing filtered
m = array(
[
[0.63, 0.75, 0.95, 1.00],
[0.75, 0.10, null, 0.95],
[0.95, null, 0.33, 0.11],
[1.00, 0.95, 0.11, 1.00],
]
)
expected = m
actual = filter_threshold_based_multiple_interdependency(
None, m, 0.95, 5, greater_equal
)
assert_allclose(actual, expected)
## cmp_function = le
# realistic test case
m = array(
[
[0.63, 0.75, 0.95, 1.00],
[0.75, 0.10, | |
to ensure that the initial
# incrementation of this index by the _enqueue_hint_child() directly called
# below initializes index 0 of the "hints_meta" fixed list.
hints_meta_index_last = -1
# ..................{ FUNC ~ code }..................
# Python code snippet type-checking the current pith against the currently
# visited hint (to be appended to the "func_wrapper_code" string).
func_curr_code: str = None # type: ignore[assignment]
# ..................{ FUNC ~ code : locals }..................
# Local scope (i.e., dictionary mapping from the name to value of each
# attribute referenced in the signature) of this wrapper function required
# by this Python code snippet.
func_wrapper_locals: CallableScope = {}
# True only if one or more PEP-compliant type hints visitable from this
# root hint require a pseudo-random integer. If true, the higher-level
# beartype._decor._code.codemain.generate_code() function prefixes the body
# of this wrapper function with code generating such an integer.
is_var_random_int_needed = False
# ..................{ CLOSURES }..................
# Closures centralizing frequently repeated logic and thus addressing any
# Don't Repeat Yourself (DRY) concerns during the breadth-first search
# (BFS) performed below.
def _enqueue_hint_child(pith_child_expr: str) -> str:
'''
**Enqueue** (i.e., append) a new tuple of metadata describing the
currently iterated child hint to the end of the ``hints_meta`` queue,
enabling this hint to be visited by the ongoing breadth-first search
(BFS) traversing over this queue.
Parameters
----------
pith_child_expr : str
Python code snippet evaluating to the child pith to be
type-checked against the currently iterated child hint.
This closure also implicitly expects the following local variables of
the outer scope to be set to relevant values:
hint_child : object
Currently iterated PEP-compliant child hint subscripting the
currently visited hint.
Returns
----------
str
Placeholder string to be subsequently replaced by code
type-checking this child pith against this child hint.
'''
# Allow these local variables of the outer scope to be modified below.
nonlocal hint_child_placeholder_id, hints_meta_index_last
# Increment the 0-based index of metadata describing the last visitable
# hint in the "hints_meta" list *BEFORE* overwriting the existing
# metadata at this index.
#
# Note this index is guaranteed to *NOT* exceed the fixed length of
# this list, by prior validation.
hints_meta_index_last += 1
# Increment the unique identifier of the currently iterated child hint.
hint_child_placeholder_id += 1
# Placeholder string to be globally replaced by code type-checking the
# child pith against this child hint, intentionally prefixed and
# suffixed by characters that:
#
# * Are intentionally invalid as Python code, guaranteeing that the
# top-level call to the exec() builtin performed by the @beartype
# decorator will raise a "SyntaxError" exception if the caller fails
# to replace all placeholder substrings generated by this method.
# * Protect the identifier embedded in this substring against ambiguous
# global replacements of larger identifiers containing this
# identifier. If this identifier were *NOT* protected in this manner,
# then the first substring "0" generated by this method would
# ambiguously overlap with the subsequent substring "10" generated by
# this method, which would then produce catastrophically erroneous
# and non-trivial to debug Python code.
hint_child_placeholder = (
f'{PEP_CODE_HINT_CHILD_PLACEHOLDER_PREFIX}'
f'{str(hint_child_placeholder_id)}'
f'{PEP_CODE_HINT_CHILD_PLACEHOLDER_SUFFIX}'
)
# Create and insert a new tuple of metadata describing this child hint
# at this index of this list.
#
# Note that this assignment is guaranteed to be safe, as "SIZE_BIG" is
# guaranteed to be substantially larger than "hints_meta_index_last".
hints_meta[hints_meta_index_last] = (
hint_child,
hint_child_placeholder,
pith_child_expr,
indent_child,
)
# Return this placeholder string.
return hint_child_placeholder
# ..................{ CLOSURES ~ locals }..................
# Local variables calling one or more closures declared above and thus
# deferred until after declaring those closures.
# Placeholder string to be globally replaced in the Python code snippet to
# be returned (i.e., "func_wrapper_code") by a Python code snippet
# type-checking the child pith expression (i.e., "pith_child_expr") against
# the currently iterated child hint (i.e., "hint_child"), initialized to a
# placeholder describing the root hint.
hint_child_placeholder = _enqueue_hint_child(pith_root_expr)
# Python code snippet type-checking the root pith against the root hint,
# localized separately from the "func_wrapper_code" snippet to enable this
# function to validate this code to be valid *BEFORE* returning this code.
func_root_code = (
f'{_PEP_CODE_CHECK_HINT_ROOT_PREFIX}{hint_child_placeholder}')
# Python code snippet to be returned, seeded with a placeholder to be
# replaced on the first iteration of the breadth-first search performed
# below with a snippet type-checking the root pith against the root hint.
func_wrapper_code = func_root_code
# ..................{ SEARCH }..................
# While the 0-based index of metadata describing the next visited hint in
# the "hints_meta" list does *NOT* exceed that describing the last
# visitable hint in this list, there remains at least one hint to be
# visited in the breadth-first search performed by this iteration.
while hints_meta_index_curr <= hints_meta_index_last:
# Metadata describing the currently visited hint.
hint_curr_meta = hints_meta[hints_meta_index_curr]
# Assert this metadata is a tuple as expected. This enables us to
# distinguish between proper access of used items and improper access
# of unused items of the parent fixed list containing this tuple, since
# an unused item of this list is initialized to "None" by default.
assert hint_curr_meta.__class__ is tuple, (
f'Current hint metadata {repr(hint_curr_meta)} at '
f'index {hints_meta_index_curr} not tuple.')
# Localize metadatum for both efficiency and f-string purposes.
hint_curr = hint_curr_meta[_HINT_META_INDEX_HINT]
hint_curr_placeholder = hint_curr_meta[_HINT_META_INDEX_PLACEHOLDER]
pith_curr_expr = hint_curr_meta[_HINT_META_INDEX_PITH_EXPR]
indent_curr = hint_curr_meta[_HINT_META_INDEX_INDENT]
#FIXME: This test can be trivially avoided by:
#* Initializing "hint_curr_label = HINT_ROOT_LABEL" above.
#* Unconditionally setting "hint_curr_label = HINT_CHILD_LABEL"
# below at the end of each iteration of this loop.
#
#Since we're going to be fundamentally refactoring this entire
#algorithm into a two-phase algorithm, let's hold off on that until the
#radioactive dust settles, shall we?
# Human-readable label prefixing the machine-readable representation of
# the currently visited type hint in exception and warning messages.
#
# Note that this label intentionally only describes the root and
# currently iterated child hints rather than the root hint, the
# currently iterated child hint, and all interim child hints leading
# from the former to the latter. The latter approach would be
# non-human-readable and insane.
hint_curr_label = (
HINT_ROOT_LABEL
if hints_meta_index_curr == 0 else
HINT_CHILD_LABEL
)
# ................{ REDUCTION }................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# CAVEATS: Synchronize changes here with the corresponding block of the
# beartype._decor._error._errorsleuth.CauseSleuth.__init__()
# method.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Reduce the currently visited hint to a lower-level hint-like object
# associated with this hint if this hint satisfies a condition.
#
# This decision is intentionally implemented as a linear series of
# tests ordered in descending likelihood for efficiency. While
# alternative implementations (that are more readily readable and
# maintainable) do exist, these alternatives all appear to be
# substantially less efficient.
#
# ................{ REDUCTION ~ pep 484 ~ none }................
# If this is the PEP 484-compliant "None" singleton, reduce this hint
# to the type of that singleton. While not explicitly defined by the
# "typing" module, PEP 484 explicitly supports this singleton:
# When used in a type hint, the expression None is considered
# equivalent to type(None).
# The "None" singleton is used to type callables lacking an explicit
# "return" statement and thus absurdly common. Ergo, detect this first.
if hint_curr is None:
hint_curr = NoneType
# ................{ REDUCTION ~ pep 593 }................
# If this is a PEP 593-compliant type metahint...
#
# Metahints form the core backbone of our beartype-specific data
# validation API and are thus also extremely common. Ergo, detect these
# next-to-first.
elif is_hint_pep593(hint_curr):
# If this metahint is beartype-specific (i.e., its second argument
# is an instance of the "beartype._vale._valesub._SubscriptedIs"
# class produced by subscripting the "Is" class), ignore all
# annotations on this hint by reducing this hint to its origin
# (e.g., | |
# %%
from functools import partial
import logging
import numpy as np
import torch
import colorsys
from torchvtk.utils import make_5d, tex_from_pts
# Persistent Homology peak extraction
class Peak:
def __init__(self, startidx):
self.born = self.left = self.right = startidx
self.died = None
def get_persistence(self, seq):
return seq[self.born] if self.died is None else seq[self.born] - seq[self.died]
def get_persistent_homology(seq):
peaks = []
# Maps indices to peaks
idxtopeak = [None for s in seq]
# Sequence indices sorted by values
indices = range(len(seq))
indices = sorted(indices, key = lambda i: seq[i], reverse=True)
# Process each sample in descending order
for idx in indices:
lftdone = (idx > 0 and idxtopeak[idx-1] is not None)
rgtdone = (idx < len(seq)-1 and idxtopeak[idx+1] is not None)
il = idxtopeak[idx-1] if lftdone else None
ir = idxtopeak[idx+1] if rgtdone else None
# New peak born
if not lftdone and not rgtdone:
peaks.append(Peak(idx))
idxtopeak[idx] = len(peaks)-1
# Directly merge to next peak left
if lftdone and not rgtdone:
peaks[il].right += 1
idxtopeak[idx] = il
# Directly merge to next peak right
if not lftdone and rgtdone:
peaks[ir].left -= 1
idxtopeak[idx] = ir
# Merge left and right peaks
if lftdone and rgtdone:
# Left was born earlier: merge right to left
if seq[peaks[il].born] > seq[peaks[ir].born]:
peaks[ir].died = idx
peaks[il].right = peaks[ir].right
idxtopeak[peaks[il].right] = idxtopeak[idx] = il
else:
peaks[il].died = idx
peaks[ir].left = peaks[il].left
idxtopeak[peaks[ir].left] = idxtopeak[idx] = ir
# This is optional convenience
return sorted(peaks, key=lambda p: p.get_persistence(seq), reverse=True)
def distinguishable_color_generator():
''' Generates distinguishable colors, compare
http://alumni.media.mit.edu/~wad/color/numbers.html
'''
colors = np.array([
[173, 35, 35],
[42, 75, 215],
[29, 105, 20],
[129, 74, 25],
[129, 38, 192],
[160, 160, 160],
[129, 197, 122],
[157, 175, 255],
[41, 208, 208],
[255, 146, 51],
[255, 238, 51],
[255, 205, 243],
[255, 255, 255]
], dtype=np.float32) / 255.0
np.random.shuffle(colors)
for color in colors:
yield color
def random_color_generator():
''' Generates random colors '''
while True:
h, s, l = np.random.rand(), 0.2 + np.random.rand() * 0.8, 0.35 + np.random.rand() * 0.3
yield np.array([float(255*i) for i in colorsys.hls_to_rgb(h,l,s)], dtype=np.float32) / 255.0
def fixed_color_generator(color=(180, 170, 170.0)):
while True: yield np.array(color).astype(np.float32) / 255.0
def get_histogram_peaks(data, bins=1024, skip_outlier=True):
vals, ranges = np.histogram(data, bins)
peaks = get_persistent_homology(vals)
ret = np.array(list(map(lambda p: (
(ranges[p.born] + ranges[p.born+1])/2.0, # intensity value
p.get_persistence(vals)), peaks # persistence for peak importance
)))
return np.stack([ret[:, 0], ret[:, 1] / peaks[0].get_persistence(vals)], axis=1)
def overlaps_trapeze(trap, ts):
for t in ts:
if trap[0,0] < t[5,0] and trap[5,0] > t[0,0]: return True
return False
def includes_maxvalue(trap, vol=None):
return trap[5, 0] >= (1.0 if vol is None else vol.max())
def includes_minvalue(trap, vol=None, eps=1e-2):
return trap[0, 0] <= (eps if vol is None else vol.min() + eps)
def flatten_clip_sort_peaks(peaks):
if len(peaks) == 0:
peaks = np.zeros((1,5))
arr = np.clip(np.stack(peaks).reshape((-1, 5)), 0, 1)
idx = np.argsort(arr[:, 0])
return arr[idx]
def colorize_trapeze(t, color):
res = np.zeros((t.shape[0], 5))
res[:, 0] = t[:, 0]
res[:, 1:4] = color
res[:, 4] = t[:, 1]
return res
def make_trapezoid(c, top_height, bot_width, fixed_shape=False):
# bot_width = bot_width * c + 1e-2 # allow for wider peaks in higher density
# int_contrib = np.clip(c * (1/0.6), 0, 1) # higher opacity on higher density (usually bones, which are often occluded)
# top_height = (int_contrib + top_height) / 2.0 # allow for mostly low peaks on skin, higher peaks on bones
if fixed_shape:
bot_height = top_height
top_width = bot_width
else:
bot_height = np.random.rand(1).item() * top_height
top_width = np.random.rand(1).item() * bot_width
return np.stack([
np.array([c - bot_width/2 -2e-2, 0]), # left wall ____________ __ top_height
np.array([c - bot_width/2, bot_height]), # bottom left / top_width \
np.array([c - top_width/2, top_height]), # top left /__ bot_width __\__ bot_height
np.array([c + top_width/2, top_height]), # top right | |
np.array([c + bot_width/2, bot_height]), # bottom right | right wall ->|
np.array([c + bot_width/2 +2e-2, 0]) # right wall |<- left wall |
])
def get_tf_pts_from_peaks(peaks, colors='random', height_range=(0.1, 0.9), width_range=(0.02, 0.2), peak_center_noise_std=0.05, max_num_peaks=5, peak_valid_fn=None, fixed_shape=False):
''' Compute transfer function with non-overlapping trapezoids around given peaks
Args:
peaks (np.array of [intensity, persistence]): The histogram peaks
colors (str): Either "distinguishable", "random" or "fixed"
height_range (tuple of floats): Range in which to draw trapezoid height (=opacity). Max range is (0, 1)
width_range (tuple of floats): Range in which to draw trapezoid width around peak. Max range is (0, 1)
peak_center_noise_std (float): Standard deviation of the Gaussian noise applied to peak centers, to shift those randomly.
max_num_peaks (int): Maximum number of peaks in the histogram. The number will be drawn as U(1, max_num_peaks)
peak_valid_fn (func): Function that gets the old TF without a new peak and the TF with the new peak and decides wether to add the peak (return True) or not (return False).
fixed_shape (bool): If True produces a classic ramp peak, if False it has random double ramps
Returns:
[ np.array [x, y] ]: List of TF primitives (List of coordinates [0,1]²) to be lerped
'''
if peak_valid_fn is None: peak_valid_fn = lambda a, b: True
if max_num_peaks is None:
n_peaks = len(peaks)
elif isinstance(max_num_peaks, (tuple, list)) and len(max_num_peaks) == 2:
n_peaks = np.random.randint(max_num_peaks[0], max_num_peaks[1] + 1)
else:
n_peaks = np.random.randint(1, max_num_peaks+1)
height_range_len = height_range[1] - height_range[0]
width_range_len = width_range[1] - width_range[0]
if colors == 'distinguishable': color_gen = distinguishable_color_generator()
elif colors == 'random': color_gen = random_color_generator()
elif colors == 'fixed': color_gen = fixed_color_generator()
else: raise Exception(f'Invalid colors argument ({colors}). Use either "distinguishable" or "random".') # | c |__ 0
if peaks is None:
peaks = np.random.rand(100, 2)
peaks = np.stack([np.linspace(0.05, 0.75, 15)]*2, axis=1)
trapezes = [make_trapezoid(c + np.random.randn() * peak_center_noise_std, # Center of peak
top_height= height_range_len * np.random.rand(1).item() + height_range[0],
bot_width = width_range_len * np.random.rand(1).item() + width_range[0],
fixed_shape=fixed_shape
) for c, p in peaks]
result = []
np.random.shuffle(trapezes)
fail_count = 0
for t in trapezes:
if overlaps_trapeze(t, result) or includes_maxvalue(t) or includes_minvalue(t):
continue
else:
trap = colorize_trapeze(t, next(color_gen))
if peak_valid_fn(
tf_pts_border(flatten_clip_sort_peaks(result)),
tf_pts_border(flatten_clip_sort_peaks(result + [trap]))):
fail_count = 0 # reset fail count if peak gets added
result.append(trap)
else: fail_count += 1 # failed in that the new TF does produce a too similar image
if len(result) >= n_peaks or fail_count > 5: break # max 5 render tries
return flatten_clip_sort_peaks(result)
def create_peaky_tf(peaks, widths, default_color=(0.7, 0.66, 0.66), default_height=0.99, warn_overlap=True):
''' Creates a peaky tf with given peak centers, widths and optional rgb, o
Beware: The output of this function is undefined for overlapping trapezes! A warning will be printed.
Args:
peaks (array): Array of shape (N) only peak centers / (N, 2) centers and opacity / (N, 4) centers and rgb / (N, 5) centers, opacity and rgb.
widths (array): Array of shape (N), same length as peaks.
default_color (array, optional): RGB value as array. Defaults to (0.7, 0.66, 0.66).
default_height (float, optional): Default opacity of none is given in peaks. Defaults to 0.99.
warn_overlap (bool, optional): Prints a warning if the resulting Transfer Function has overlapping trapezes. Defaults to True.
Returns:
Array: Point-based Transfer Function (N, 5) with the given peaks
'''
trapezes = []
for p, w in zip(peaks, widths):
if not hasattr(p, '__len__'): c, o, rgb = p, default_height, default_color
elif len(p) == 2: c, o, rgb = p[0], p[1], default_color
elif len(p) == 4: c, o, rgb = p[0], default_height, p[1:]
elif len(p) == 5: c, o, rgb = p[0], p[1], p[2:]
else: raise Exception(f'Invalid input for peaks: list of {p}. See docstring of create_peaky_tf()')
if warn_overlap and overlaps_trapeze(make_trapezoid(c, o, w, fixed_shape=True), trapezes):
logging.warning(f'create_peaky_tf() has overlapping trapezes. First overlapping trapeze in the sequence: (center={c}, width={w}, index={len(trapezes)})')
trapezes.append(colorize_trapeze(make_trapezoid(c, o, w, fixed_shape=True), rgb))
return tf_pts_border(flatten_clip_sort_peaks(trapezes))
def create_cos_tf(phases, amplitudes, frequencies=range):
n = len(phases)
if not torch.is_tensor(phases): phases = torch.Tensor(phases)
if not torch.is_tensor(amplitudes): amplitudes = torch.Tensor(amplitudes)
if hasattr(frequencies, __call__):
freqs = torch.Tensor(frequencies(n))
else:
assert len(frequencies) == n
def tf(x):
x.expand(*([-1]*x.ndim), n)
torch.cos(freqs * (x + phases)) * amplitudes
def tries():
n = 20
amps = torch.rand(n)
#freqs = torch.cat([torch.arange(2, 2+n//2), torch.arange(n//2, n)**1.4]).round()
freqs = torch.arange(2, n+2)**1.2
phases = torch.rand(n) * pi
x = torch.linspace(0,1,100).unsqueeze(-1).expand(-1, n)
plt.ylim((0,1))
pts = (torch.cos(pi * freqs * (x + | |
# streamclone.py - producing and consuming streaming repository data
#
# Copyright 2015 <NAME> <<EMAIL>>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import struct
import time
from .i18n import _
from . import (
branchmap,
error,
store,
util,
)
def canperformstreamclone(pullop, bailifbundle2supported=False):
"""Whether it is possible to perform a streaming clone as part of pull.
``bailifbundle2supported`` will cause the function to return False if
bundle2 stream clones are supported. It should only be called by the
legacy stream clone code path.
Returns a tuple of (supported, requirements). ``supported`` is True if
streaming clone is supported and False otherwise. ``requirements`` is
a set of repo requirements from the remote, or ``None`` if stream clone
isn't supported.
"""
repo = pullop.repo
remote = pullop.remote
bundle2supported = False
if pullop.canusebundle2:
if 'v1' in pullop.remotebundle2caps.get('stream', []):
bundle2supported = True
# else
# Server doesn't support bundle2 stream clone or doesn't support
# the versions we support. Fall back and possibly allow legacy.
# Ensures legacy code path uses available bundle2.
if bailifbundle2supported and bundle2supported:
return False, None
# Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
#elif not bailifbundle2supported and not bundle2supported:
# return False, None
# Streaming clone only works on empty repositories.
if len(repo):
return False, None
# Streaming clone only works if all data is being requested.
if pullop.heads:
return False, None
streamrequested = pullop.streamclonerequested
# If we don't have a preference, let the server decide for us. This
# likely only comes into play in LANs.
if streamrequested is None:
# The server can advertise whether to prefer streaming clone.
streamrequested = remote.capable('stream-preferred')
if not streamrequested:
return False, None
# In order for stream clone to work, the client has to support all the
# requirements advertised by the server.
#
# The server advertises its requirements via the "stream" and "streamreqs"
# capability. "stream" (a value-less capability) is advertised if and only
# if the only requirement is "revlogv1." Else, the "streamreqs" capability
# is advertised and contains a comma-delimited list of requirements.
requirements = set()
if remote.capable('stream'):
requirements.add('revlogv1')
else:
streamreqs = remote.capable('streamreqs')
# This is weird and shouldn't happen with modern servers.
if not streamreqs:
return False, None
streamreqs = set(streamreqs.split(','))
# Server requires something we don't support. Bail.
if streamreqs - repo.supportedformats:
return False, None
requirements = streamreqs
return True, requirements
def maybeperformlegacystreamclone(pullop):
"""Possibly perform a legacy stream clone operation.
Legacy stream clones are performed as part of pull but before all other
operations.
A legacy stream clone will not be performed if a bundle2 stream clone is
supported.
"""
supported, requirements = canperformstreamclone(pullop)
if not supported:
return
repo = pullop.repo
remote = pullop.remote
# Save remote branchmap. We will use it later to speed up branchcache
# creation.
rbranchmap = None
if remote.capable('branchmap'):
rbranchmap = remote.branchmap()
repo.ui.status(_('streaming all changes\n'))
fp = remote.stream_out()
l = fp.readline()
try:
resp = int(l)
except ValueError:
raise error.ResponseError(
_('unexpected response from remote server:'), l)
if resp == 1:
raise error.Abort(_('operation forbidden by server'))
elif resp == 2:
raise error.Abort(_('locking the remote repository failed'))
elif resp != 0:
raise error.Abort(_('the server sent an unknown error code'))
l = fp.readline()
try:
filecount, bytecount = map(int, l.split(' ', 1))
except (ValueError, TypeError):
raise error.ResponseError(
_('unexpected response from remote server:'), l)
with repo.lock():
consumev1(repo, fp, filecount, bytecount)
# new requirements = old non-format requirements +
# new format-related remote requirements
# requirements from the streamed-in repository
repo.requirements = requirements | (
repo.requirements - repo.supportedformats)
repo._applyopenerreqs()
repo._writerequirements()
if rbranchmap:
branchmap.replacecache(repo, rbranchmap)
repo.invalidate()
def allowservergeneration(ui):
"""Whether streaming clones are allowed from the server."""
return ui.configbool('server', 'uncompressed', True, untrusted=True)
# This is it's own function so extensions can override it.
def _walkstreamfiles(repo):
return repo.store.walk()
def generatev1(repo):
"""Emit content for version 1 of a streaming clone.
This returns a 3-tuple of (file count, byte size, data iterator).
The data iterator consists of N entries for each file being transferred.
Each file entry starts as a line with the file name and integer size
delimited by a null byte.
The raw file data follows. Following the raw file data is the next file
entry, or EOF.
When used on the wire protocol, an additional line indicating protocol
success will be prepended to the stream. This function is not responsible
for adding it.
This function will obtain a repository lock to ensure a consistent view of
the store is captured. It therefore may raise LockError.
"""
entries = []
total_bytes = 0
# Get consistent snapshot of repo, lock during scan.
with repo.lock():
repo.ui.debug('scanning\n')
for name, ename, size in _walkstreamfiles(repo):
if size:
entries.append((name, size))
total_bytes += size
repo.ui.debug('%d files, %d bytes to transfer\n' %
(len(entries), total_bytes))
svfs = repo.svfs
oldaudit = svfs.mustaudit
debugflag = repo.ui.debugflag
svfs.mustaudit = False
def emitrevlogdata():
try:
for name, size in entries:
if debugflag:
repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
# partially encode name over the wire for backwards compat
yield '%s\0%d\n' % (store.encodedir(name), size)
if size <= 65536:
with svfs(name, 'rb') as fp:
yield fp.read(size)
else:
for chunk in util.filechunkiter(svfs(name), limit=size):
yield chunk
finally:
svfs.mustaudit = oldaudit
return len(entries), total_bytes, emitrevlogdata()
def generatev1wireproto(repo):
"""Emit content for version 1 of streaming clone suitable for the wire.
This is the data output from ``generatev1()`` with a header line
indicating file count and byte size.
"""
filecount, bytecount, it = generatev1(repo)
yield '%d %d\n' % (filecount, bytecount)
for chunk in it:
yield chunk
def generatebundlev1(repo, compression='UN'):
"""Emit content for version 1 of a stream clone bundle.
The first 4 bytes of the output ("HGS1") denote this as stream clone
bundle version 1.
The next 2 bytes indicate the compression type. Only "UN" is currently
supported.
The next 16 bytes are two 64-bit big endian unsigned integers indicating
file count and byte count, respectively.
The next 2 bytes is a 16-bit big endian unsigned short declaring the length
of the requirements string, including a trailing \0. The following N bytes
are the requirements string, which is ASCII containing a comma-delimited
list of repo requirements that are needed to support the data.
The remaining content is the output of ``generatev1()`` (which may be
compressed in the future).
Returns a tuple of (requirements, data generator).
"""
if compression != 'UN':
raise ValueError('we do not support the compression argument yet')
requirements = repo.requirements & repo.supportedformats
requires = ','.join(sorted(requirements))
def gen():
yield 'HGS1'
yield compression
filecount, bytecount, it = generatev1(repo)
repo.ui.status(_('writing %d bytes for %d files\n') %
(bytecount, filecount))
yield struct.pack('>QQ', filecount, bytecount)
yield struct.pack('>H', len(requires) + 1)
yield requires + '\0'
# This is where we'll add compression in the future.
assert compression == 'UN'
seen = 0
repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes'))
for chunk in it:
seen += len(chunk)
repo.ui.progress(_('bundle'), seen, total=bytecount,
unit=_('bytes'))
yield chunk
repo.ui.progress(_('bundle'), None)
return requirements, gen()
def consumev1(repo, fp, filecount, bytecount):
"""Apply the contents from version 1 of a streaming clone file handle.
This takes the output from "streamout" and applies it to the specified
repository.
Like "streamout," the status line added by the wire protocol is not handled
by this function.
"""
with repo.lock():
repo.ui.status(_('%d files to transfer, %s of data\n') %
(filecount, util.bytecount(bytecount)))
handled_bytes = 0
repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
start = time.time()
# TODO: get rid of (potential) inconsistency
#
# If transaction is started and any @filecache property is
# changed at this point, it causes inconsistency between
# in-memory cached property and streamclone-ed file on the
# disk. Nested transaction prevents transaction scope "clone"
# below from writing in-memory changes out at the end of it,
# even though in-memory changes are discarded at the end of it
# regardless of transaction nesting.
#
# But transaction nesting can't be simply prohibited, because
# nesting occurs also in ordinary case (e.g. enabling
# clonebundles).
with | |
]
for slackrtm in _slackrtms:
segments.append(hangups.ChatMessageSegment('%s' % slackrtm.name))
segments.append(hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK))
bot.send_message_segments(event.conv, segments)
def slack_channels(bot, event, *args):
"""list all slack channels available in specified slack team
usage: /bot slack_channels <teamname>"""
if len(args) != 1:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: You must specify a slack team name to list channels of', is_bold=True)])
return
slackname = args[0]
slackrtm = None
for s in _slackrtms:
if s.name == slackname:
slackrtm = s
break
if not slackrtm:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a configured slack team with name "%s", use /bot slacks to list all teams' % slackname, is_bold=True)])
return
segments = []
segments.append(hangups.ChatMessageSegment('Slack channels in team %s:' % (slackname), is_bold=True))
segments.append(hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK))
slackrtm.update_channelinfos()
for cid in slackrtm.channelinfos:
if not slackrtm.channelinfos[cid]['is_archived']:
segments.append(hangups.ChatMessageSegment('%s (%s)' % (slackrtm.channelinfos[cid]['name'], cid)))
segments.append(hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK))
segments.append(hangups.ChatMessageSegment('private groups:', is_bold=True))
segments.append(hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK))
slackrtm.update_groupinfos()
for gid in slackrtm.groupinfos:
if not slackrtm.groupinfos[gid]['is_archived']:
segments.append(hangups.ChatMessageSegment('%s (%s)' % (slackrtm.groupinfos[gid]['name'], gid)))
segments.append(hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK))
bot.send_message_segments(event.conv, segments)
def slack_users(bot, event, *args):
"""list all slack channels available in specified slack team
usage: /bot slack_users <team> <channel>"""
if len(args) >= 3:
honame = ' '.join(args[2:])
else:
if len(args) != 2:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: You must specify a slack team name and a channel', is_bold=True)])
return
honame = bot.conversations.get_name(event.conv)
slackname = args[0]
slackrtm = None
for s in _slackrtms:
if s.name == slackname:
slackrtm = s
break
if not slackrtm:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a configured slack team with name "%s", use /bot slacks to list all teams' % slackname, is_bold=True)])
return
slackrtm.update_channelinfos()
channelid = args[1]
channelname = slackrtm.get_groupname(channelid, slackrtm.get_channelname(channelid))
if not channelname:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a channel with id "%s" in team "%s", use /bot slack_channels %s to list all teams' % (channelid, slackname, slackname), is_bold=True)])
return
segments = []
segments.append(hangups.ChatMessageSegment('Slack users in channel %s:' % (channelname), is_bold=True))
segments.append(hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK))
users = slackrtm.get_channel_users(channelid)
for username, realname in sorted(users.items()):
segments.append(hangups.ChatMessageSegment('%s (%s)' % (realname, username)))
segments.append(hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK))
bot.send_message_segments(event.conv, segments)
def slack_listsyncs(bot, event, *args):
"""list current conversations we are syncing
usage: /bot slack_listsyncs"""
segments = [
hangups.ChatMessageSegment('list of currently synced conversations:', is_bold=True),
hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK)
]
for slackrtm in _slackrtms:
for sync in slackrtm.syncs:
hangoutname = 'unknown'
for c in bot.list_conversations():
if c.id_ == sync.hangoutid:
hangoutname = bot.conversations.get_name(c, truncate=False)
break
segments.extend(
[
hangups.ChatMessageSegment(
'%s:%s(%s) : %s(%s)' % (
slackrtm.name,
slackrtm.get_channelname(sync.channelid),
sync.channelid,
hangoutname,
sync.hangoutid
),
is_bold=True
),
hangups.ChatMessageSegment(' '),
hangups.ChatMessageSegment(sync.getPrintableOptions(), is_italic=True),
hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK),
]
)
bot.send_message_segments(event.conv, segments)
def slack_syncto(bot, event, *args):
"""start syncing the current hangout to a given slack team/channel
usage: /bot slack_syncto <teamname> <channelid>"""
if len(args) >= 3:
honame = ' '.join(args[2:])
else:
if len(args) != 2:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: You must specify a slack team name and a channel', is_bold=True)])
return
honame = bot.conversations.get_name(event.conv)
slackname = args[0]
slackrtm = None
for s in _slackrtms:
if s.name == slackname:
slackrtm = s
break
if not slackrtm:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a configured slack team with name "%s", use /bot slacks to list all teams' % slackname, is_bold=True)])
return
channelid = args[1]
channelname = slackrtm.get_groupname(channelid, slackrtm.get_channelname(channelid))
if not channelname:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a channel with id "%s" in team "%s", use /bot slack_channels %s to list all teams' % (channelid, slackname, slackname), is_bold=True)])
return
try:
slackrtm.syncto(channelid, event.conv.id_, honame)
except AlreadySyncingError:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('Already syncing this Hangout to %s:%s.' % (slackname, channelname), is_bold=True)])
else:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('OK, I will now sync all messages in this Hangout to %s:%s.' % (slackname, channelname), is_bold=True)])
def slack_disconnect(bot, event, *args):
"""stop syncing the current hangout with given slack team and channel
usage: /bot slack_disconnect <teamname> <channelid>"""
if len(args) != 2:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: You must specify a slack team name and a channel', is_bold=True)])
return
slackname = args[0]
slackrtm = None
for s in _slackrtms:
if s.name == slackname:
slackrtm = s
break
if not slackrtm:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a configured slack team with name "%s", use /bot slacks to list all teams' % slackname, is_bold=True)])
return
channelid = args[1]
channelname = slackrtm.get_groupname(channelid, slackrtm.get_channelname(channelid))
if not channelname:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a channel with id "%s" in team "%s", use /bot slack_channels %s to list all teams' % (channelid, slackname, slackname), is_bold=True)])
return
try:
slackrtm.disconnect(channelid, event.conv.id_)
except NotSyncingError:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('This Hangout is NOT synced to %s:%s.' % (slackname, channelname), is_bold=True)])
else:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('OK, I will no longer sync messages in this Hangout to %s:%s.' % (slackname, channelname), is_bold=True)])
def slack_setsyncjoinmsgs(bot, event, *args):
"""enable or disable sending notifications any time someone joins/leaves/adds/invites/kicks
usage: /bot slack_setsyncjoinmsgs <teamname> <channelid> {true|false}"""
if len(args) != 3:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: You must specify a slack team name, a channel and "true" or "false"', is_bold=True)])
return
slackname = args[0]
slackrtm = None
for s in _slackrtms:
if s.name == slackname:
slackrtm = s
break
if not slackrtm:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a configured slack team with name "%s", use /bot slacks to list all teams' % slackname, is_bold=True)])
return
channelid = args[1]
channelname = slackrtm.get_groupname(channelid, slackrtm.get_channelname(channelid))
if not channelname:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a channel with id "%s" in team "%s", use /bot slack_channels %s to list all teams' % (channelid, slackname, slackname), is_bold=True)])
return
enable = args[2]
if enable.lower() in ['true', 'on', 'y', 'yes']:
enable = True
elif enable.lower() in ['false', 'off', 'n', 'no']:
enable = False
else:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('sorry, but "%s" is not "true" or "false"' % enable, is_bold=True)])
return
try:
slackrtm.setsyncjoinmsgs(channelid, event.conv.id_, enable)
except NotSyncingError:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('This Hangout is NOT synced to %s:%s.' % (slackname, channelname), is_bold=True)])
else:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('OK, I will %s sync join/leave messages in this Hangout with %s:%s.' % (('now' if enable else 'no longer'), slackname, channelname), is_bold=True)])
def slack_setimageupload(bot, event, *args):
"""enable/disable image upload between the synced conversations (default: enabled)
usage: /bot slack_setimageupload <teamname> <channelid> {true|false}"""
if len(args) != 3:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: You must specify a slack team name, a channel and "true" or "false"', is_bold=True)])
return
slackname = args[0]
slackrtm = None
for s in _slackrtms:
if s.name == slackname:
slackrtm = s
break
if not slackrtm:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a configured slack team with name "%s", use /bot slacks to list all teams' % slackname, is_bold=True)])
return
channelid = args[1]
channelname = slackrtm.get_groupname(channelid, slackrtm.get_channelname(channelid))
if not channelname:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a channel with id "%s" in team "%s", use /bot slack_channels %s to list all teams' % (channelid, slackname, slackname), is_bold=True)])
return
upload = args[2]
if upload.lower() in ['true', 'on', 'y', 'yes']:
upload = True
elif upload.lower() in ['false', 'off', 'n', 'no']:
upload = False
else:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('sorry, but "%s" is not "true" or "false"' % upload, is_bold=True)])
return
try:
slackrtm.setimageupload(channelid, event.conv.id_, upload)
except NotSyncingError:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('This Hangout is NOT synced to %s:%s.' % (slackname, channelname), is_bold=True)])
else:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('OK, I will %s upload images to this Hangout when shared in %s:%s.' % (('now' if upload else 'no longer'), slackname, channelname), is_bold=True)])
def slack_sethotag(bot, event, *args):
"""sets the identity of current hangout when syncing this conversation
(default: title of this hangout when sync was set up, use 'none' to disable tagging)
usage: /bot slack_hotag <teamname> <channelid> {<tag>|none}"""
if len(args) < 3:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: You must specify a slack team name, a channel and a tag', is_bold=True)])
return
slackname = args[0]
slackrtm = None
for s in _slackrtms:
if s.name == slackname:
slackrtm = s
break
if not slackrtm:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a configured slack team with name "%s", use /bot slacks to list all teams' % slackname, is_bold=True)])
return
channelid = args[1]
channelname = slackrtm.get_groupname(channelid, slackrtm.get_channelname(channelid))
if not channelname:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('ERROR: Could not find a channel with id "%s" in team "%s", use /bot slack_channels %s to list all teams' % (channelid, slackname, slackname), is_bold=True)])
return
hotag = ' '.join(args[2:])
if hotag == 'none':
hotag = None
oktext = 'NOT be tagged'
else:
oktext = 'be tagged with " (%s)"' % hotag
try:
slackrtm.sethotag(channelid, event.conv.id_, hotag)
except NotSyncingError:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('This Hangout is NOT synced to %s:%s.' % (slackname, channelname), is_bold=True)])
else:
bot.send_message_segments(event.conv, [hangups.ChatMessageSegment('OK, messages from this Hangout will %s in slack | |
import unittest
from src.preprocess.pattern import NumberOfHoles, DefineLanguage_HoleReachabilitySolver, NtGraphBuilder
from src.model.pattern import PatSequence, BuiltInPat, Nt, Repeat, Lit, LitKind, BuiltInPatKind, RepeatMatchMode, InHole, PatternAttribute
from src.model.tlform import DefineLanguage, Module
from src.context import CompilationContext
from src.parser import parse_string
from src.util import CompilationError
def result(lang, nt):
return lang.nts[nt].nt.getattribute(PatternAttribute.NumberOfHoles)
class TestDefineLanguageHoleReachabilitySolver(unittest.TestCase):
# (n ::= number)
# (P ::= (E))
# (E ::= (E n) hole)
# n = (zero, zero) P = (one one) E = (one one)
def test_holereachability0(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
PatSequence([Nt('E', 'E_2'), Nt('n', 'n_3')]),
BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.One , NumberOfHoles.One ))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.One , NumberOfHoles.One ))
# (P ::= (E))
# (E ::= P)
# P = (zero, zero) E = (zero zero)
def test_holereachability1(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'E'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
# (n ::= number) (zero zero)
# (P ::= (E)) (one one)
# (E ::= P (E n) hole) (one one)
# The algorithm does not deal with infinite cycles that well - for example we can have
# term (((( (E) )))) that is infinite and thus ideally should match zero holes.
# Since algorithm used to simply propagates holes throughout the graph, it does not take
# into account inner-node cycles such as (E) -> (E n) -> (E). Perhaps for each edge in such
# cycle we should enforce min value of Zero holes?
# It might be fine for our purposes of checking in-hole patterns that involves checking
# if given expression matches exactly one hole.
def test_holereachability2(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
PatSequence([Nt('E', 'E_2'), Nt('n', 'n_3')]),
BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.One , NumberOfHoles.One ))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.One , NumberOfHoles.One ))
# (n ::= number) (zero zero)
# (P ::= (E)) (one many)
# (E ::= P (E n) (E E) hole) (one many)
def test_holereachability3(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
PatSequence([Nt('E', 'E_3'), Nt('E', 'E_4')]),
PatSequence([Nt('E', 'E_5'), Nt('n', 'n_6')]),
BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.One , NumberOfHoles.Many))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.One , NumberOfHoles.Many))
# (n ::= number) (zero zero)
# (P ::= (E)) (zero many)
# (E ::= P n (E n) (E E) hole) (zero many) zero because n
def test_holereachability4(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
Nt('n', 'n_2'),
PatSequence([Nt('E', 'E_3'), Nt('E', 'E_4')]),
PatSequence([Nt('E', 'E_5'), Nt('n', 'n_6')]),
BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.Zero, NumberOfHoles.Many))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.Zero, NumberOfHoles.Many))
# (n ::= number) (zero zero)
# (P ::= (E)) (zero many)
# (E ::= P (E n) (hole ...)) (zero many) hole under ellipsis
def test_holereachability5(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
PatSequence([Nt('E', 'E_5'), Nt('n', 'n_6')]),
PatSequence([Repeat(BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'))]),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.Zero, NumberOfHoles.Many))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.Zero, NumberOfHoles.Many))
# (n ::= number) (zero zero)
# (P ::= (E)) (one many)
# (E ::= P (E hole)) (one many) (((...) hole) hole)
def test_holereachability6(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
PatSequence([Nt('E', 'E_5'), BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole')]),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.Many, NumberOfHoles.Many))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.Many, NumberOfHoles.Many))
# (n ::= number) (zero zero)
# (P ::= (E)) (zero many)
# (E ::= P n (E hole)) (zero many) because n
def test_holereachability7(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
Nt('n', 'n_2'),
PatSequence([Nt('E', 'E_5'), BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole')]),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.Zero, NumberOfHoles.Many))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.Zero, NumberOfHoles.Many))
# (n ::= number) (zero zero)
# (P ::= (E E)) (many many)
# (E ::= P (E n) hole) (one many)
def test_holereachability8(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0'), Nt('E', 'E_1')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
PatSequence([Nt('E', 'E_5'), Nt('n', 'n_2')]),
BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.One , NumberOfHoles.Many))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.Many, NumberOfHoles.Many))
# (n ::= number) (zero zero)
# (P ::= (E E) hole) (one many)
# (E ::= P (E n)) (one many)
def test_holereachability9(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0'), Nt('E', 'E_1')]),
BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
PatSequence([Nt('E', 'E_5'), Nt('n', 'n_2')]),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.One , NumberOfHoles.Many))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.One , NumberOfHoles.Many))
# (n ::= number) (zero zero)
# (Z ::= P) (zero many)
# (P ::= (E)) (zero many)
# (E ::= P ((Z) ... n) hole (zero many) because Z under ellipsis
def test_holereachability10(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('Z', 'Z'), [
Nt('P', 'P')
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
PatSequence([
Repeat(PatSequence([Nt('Z', 'Z'), ])),
Nt('n', 'n_2'),
]),
BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.Zero, NumberOfHoles.Many))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.Zero, NumberOfHoles.Many))
self.assertEqual(result(lang, 'Z'), (NumberOfHoles.Zero, NumberOfHoles.Many))
# (n ::= number) (zero zero)
# (P ::= (E)) (zero many)
# (E ::= P ((P) ... ()) hole (zero many)
def test_holereachability11(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
PatSequence([
Repeat(PatSequence([Nt('P', 'P'), ])),
PatSequence([]),
]),
BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.Zero, NumberOfHoles.Many))
self.assertEqual(result(lang, 'P'), (NumberOfHoles.Zero, NumberOfHoles.Many))
# (n ::= number) (zero zero)
# (P ::= (E)) (zero many)
# (E ::= P (in-hole P n) hole (zero many)
# Think we should disallow in-hole patterns in language grammar definition.
def test_holereachability12(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
InHole(Nt('P', 'P'), Nt('n', 'n')),
BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'),
]),
])
try:
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.fail()
except CompilationError as ex:
self.assertEqual(str(ex), 'in-hole pattern in define-language')
# (n ::= number) (zero zero)
# (P ::= (E)) (zero many)
# (E ::= P ((in-hole P n) ...) hole (zero many)
# Think we should disallow in-hole patterns in language grammar definition.
def test_holereachability13(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('P', 'P'), [
PatSequence([Nt('E', 'E_0')]),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
Nt('P', 'P_1'),
PatSequence([Repeat(InHole(Nt('P', 'P'), Nt('n', 'n'))) ]),
BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole'),
]),
])
try:
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.fail()
except CompilationError as ex:
self.assertEqual(str(ex), 'in-hole pattern in define-language')
# n ::number (zero, zero)
# E ::= (E hole)(E E) n (zero, many)
def test_holereachability14(self):
lang = DefineLanguage('Lang', [
DefineLanguage.NtDefinition(Nt('n', 'n'), [
BuiltInPat(BuiltInPatKind.Number, 'number', 'number'),
]),
DefineLanguage.NtDefinition(Nt('E', 'E'), [
PatSequence([Nt('E', 'E'), Nt('E', 'E') ]),
PatSequence([Nt('E', 'E'), BuiltInPat(BuiltInPatKind.Hole, 'hole', 'hole') ]),
Nt('n', 'n'),
]),
])
graph = NtGraphBuilder(lang).run()
DefineLanguage_HoleReachabilitySolver(lang, graph).run()
self.assertEqual(result(lang, 'n'), (NumberOfHoles.Zero, NumberOfHoles.Zero))
self.assertEqual(result(lang, 'E'), (NumberOfHoles.Zero, NumberOfHoles.Many))
| |
import datetime
from django.db import models
from django.db.models import Q, QuerySet
from .base import OCDBase, LinkBase, OCDIDField, RelatedBase, IdentifierBase
from .division import Division
from .jurisdiction import Jurisdiction
from ... import common
# abstract models
class ContactDetailBase(RelatedBase):
"""
A base class for ContactDetail models.
"""
type = models.CharField(
max_length=50,
choices=common.CONTACT_TYPE_CHOICES,
help_text="The type of Contact being defined.",
)
value = models.CharField(
max_length=300,
help_text="The content of the Contact information like a phone number or email address.",
)
note = models.CharField(
max_length=300,
blank=True,
help_text="A short, optional note about the Contact value.",
)
label = models.CharField(
max_length=300, blank=True, help_text="A title for the content of the Contact."
)
class Meta:
abstract = True
def __str__(self):
return "{}: {}".format(self.get_type_display(), self.value)
class OtherNameBase(RelatedBase):
"""
A base class for OtherName models.
"""
name = models.CharField(
max_length=500, db_index=True, help_text="An alternative name."
)
note = models.CharField(
max_length=500,
blank=True,
help_text="A short, optional note about alternative name.",
)
start_date = models.CharField(
max_length=10,
blank=True,
help_text="An optional start date for usage of the alternative name "
"in YYYY[-MM[-DD]] string format.",
)
end_date = models.CharField(
max_length=10,
blank=True,
help_text="An optional end date for usage of the alternative name in "
"YYYY[-MM[-DD]] string format.",
)
class Meta:
abstract = True
def __str__(self):
return "{} ({})".format(self.name, self.note)
# the actual models
class Organization(OCDBase):
"""
A group of people, typically in a legislative or rule-making context.
"""
id = OCDIDField(ocd_type="organization")
name = models.CharField(max_length=300, help_text="The name of the Organization.")
image = models.URLField(
blank=True,
max_length=2000,
help_text="A URL leading to an image that identifies the Organization visually.",
)
parent = models.ForeignKey(
"self",
related_name="children",
null=True,
# parent can be deleted w/o affecting children
on_delete=models.SET_NULL,
help_text="A link to another Organization that serves as this Organization's parent.",
)
jurisdiction = models.ForeignKey(
Jurisdiction,
related_name="organizations",
null=True,
# deletion of a jurisdiction should be hard
on_delete=models.PROTECT,
help_text="A link to the Jurisdiction that contains this Organization.",
)
classification = models.CharField(
max_length=100,
blank=True,
choices=common.ORGANIZATION_CLASSIFICATION_CHOICES,
help_text="The type of Organization being defined.",
)
founding_date = models.CharField(
max_length=10,
blank=True,
help_text="The founding date of the Organization in YYYY[-MM[-DD]] string format.",
)
dissolution_date = models.CharField(
max_length=10,
blank=True,
help_text="The dissolution date of the Organization in YYYY[-MM[-DD]] string format.",
)
def __str__(self):
return self.name
# Access all "ancestor" organizations
def get_parents(self):
org = self
while True:
org = org.parent
# Django accesses parents lazily, so have to check if one actually exists
if org:
yield org
else:
break
def get_current_members(self):
""" return all Person objects w/ current memberships to org """
today = datetime.date.today().isoformat()
return Person.objects.filter(
Q(memberships__start_date="") | Q(memberships__start_date__lte=today),
Q(memberships__end_date="") | Q(memberships__end_date__gte=today),
memberships__organization_id=self.id,
)
class Meta:
db_table = "opencivicdata_organization"
index_together = [
["jurisdiction", "classification", "name"],
["classification", "name"],
]
class OrganizationIdentifier(IdentifierBase):
"""
Upstream identifiers of an Organization.
"""
organization = models.ForeignKey(
Organization,
related_name="identifiers",
help_text="Reference to the Organization identified by this alternative identifier.",
on_delete=models.CASCADE,
)
def __str__(self):
tmpl = "%s identifies %s"
return tmpl % (self.identifier, self.organization)
class Meta:
db_table = "opencivicdata_organizationidentifier"
class OrganizationName(OtherNameBase):
"""
Alternate or former name for an Organization.
"""
organization = models.ForeignKey(
Organization,
related_name="other_names",
help_text="A link to the Organization with this alternative name.",
on_delete=models.CASCADE,
)
class Meta:
db_table = "opencivicdata_organizationname"
class OrganizationContactDetail(ContactDetailBase):
"""
Contact information for an Organization.
"""
organization = models.ForeignKey(
Organization,
related_name="contact_details",
help_text="A link to the Organization connected to this contact.",
on_delete=models.CASCADE,
)
class Meta:
db_table = "opencivicdata_organizationcontactdetail"
class OrganizationLink(LinkBase):
"""
URL for a document about an Organization.
"""
organization = models.ForeignKey(
Organization,
related_name="links",
help_text="A reference to the Organization connected to this link.",
on_delete=models.CASCADE,
)
class Meta:
db_table = "opencivicdata_organizationlink"
class OrganizationSource(LinkBase):
"""
Source used in assembling an Organization.
"""
organization = models.ForeignKey(
Organization,
related_name="sources",
help_text="A link to the Organization connected to this source.",
on_delete=models.CASCADE,
)
class Meta:
db_table = "opencivicdata_organizationsource"
class Post(OCDBase):
"""
A position in an organization that exists independently of the person holding it.
"""
id = OCDIDField(ocd_type="post")
label = models.CharField(max_length=300, help_text="A label describing the Post.")
role = models.CharField(
max_length=300,
blank=True,
help_text="The function that the holder of the post fulfills.",
)
organization = models.ForeignKey(
Organization,
related_name="posts",
help_text="The Organization in which the post is held.",
on_delete=models.CASCADE,
)
division = models.ForeignKey(
Division,
related_name="posts",
null=True,
blank=True,
default=None,
help_text="The Division where the post exists.",
# if the division goes away the post is just jurisdiction-less
on_delete=models.SET_NULL,
)
start_date = models.CharField(
max_length=10,
blank=True,
help_text="An optional start date for the Post in YYYY[-MM[-DD]] string format.",
)
end_date = models.CharField(
max_length=10,
blank=True,
help_text="An optional end date for the Post in YYYY[-MM[-DD]] string format.",
)
maximum_memberships = models.PositiveIntegerField(
default=1, help_text="The maximum number of people who can hold this Post."
)
class Meta:
db_table = "opencivicdata_post"
index_together = [["organization", "label"]]
def __str__(self):
return "{} - {} - {}".format(self.role, self.label, self.organization)
class PostContactDetail(ContactDetailBase):
"""
Contact information for whoever currently occupies a Post.
"""
post = models.ForeignKey(
Post,
related_name="contact_details",
help_text="A link to the Post connected to this contact.",
on_delete=models.CASCADE,
)
class Meta:
db_table = "opencivicdata_postcontactdetail"
class PostLink(LinkBase):
"""
URL for a document about a Post.
"""
post = models.ForeignKey(
Post,
related_name="links",
on_delete=models.CASCADE,
help_text="A reference to the Post connected to this link.",
)
class Meta:
db_table = "opencivicdata_postlink"
class PersonQuerySet(QuerySet):
def member_of(self, organization_name, current_only=True, post=None):
filter_params = []
if current_only:
today = datetime.date.today().isoformat()
filter_params = [
Q(memberships__start_date="") | Q(memberships__start_date__lte=today),
Q(memberships__end_date="") | Q(memberships__end_date__gte=today),
]
if post:
filter_params.append(Q(memberships__post__label=post))
if organization_name.startswith("ocd-organization/"):
qs = self.filter(
*filter_params, memberships__organization_id=organization_name
)
else:
qs = self.filter(
*filter_params, memberships__organization__name=organization_name
)
return qs
class Person(OCDBase):
"""
An individual that has served in a political office.
"""
objects = PersonQuerySet.as_manager()
id = OCDIDField(ocd_type="person")
name = models.CharField(
max_length=300, db_index=True, help_text="A Person's preferred full name."
)
sort_name = models.CharField(
max_length=100,
default="",
blank=True,
help_text="A version of a Person's full name rearranged for alphabetical sorting.",
)
family_name = models.CharField(
max_length=100, blank=True, help_text="A Person's family name."
)
given_name = models.CharField(
max_length=100, blank=True, help_text="A Person's given name."
)
image = models.URLField(
blank=True,
max_length=2000,
help_text="A URL leading to an image that identifies the Person visually.",
)
gender = models.CharField(max_length=100, blank=True, help_text="A Person's gender")
summary = models.CharField(
max_length=500,
blank=True,
help_text="A short, one-line account of a Person's life.",
)
national_identity = models.CharField(
max_length=300, blank=True, help_text="The nation a Person is identified with."
)
biography = models.TextField(
blank=True, help_text="An extended account of a Person's life."
)
birth_date = models.CharField(
max_length=10,
blank=True,
help_text="The date of a Person's birth in YYYY[-MM[-DD]] string format.",
)
death_date = models.CharField(
max_length=10,
blank=True,
help_text="The date of a Person's death in YYYY[-MM[-DD]] string format.",
)
def __str__(self):
return self.name
def add_other_name(self, name, note=""):
PersonName.objects.create(name=name, note=note, person_id=self.id)
class Meta:
db_table = "opencivicdata_person"
verbose_name_plural = "people"
class PersonIdentifier(IdentifierBase):
"""
Upstream identifier for a Person.
"""
person = models.ForeignKey(
Person,
related_name="identifiers",
on_delete=models.CASCADE,
help_text="A link to the Person connected to this alternative identifier.",
)
class Meta:
db_table = "opencivicdata_personidentifier"
class PersonName(OtherNameBase):
"""
Alternate or former name of a Person.
"""
person = models.ForeignKey(
Person,
related_name="other_names",
on_delete=models.CASCADE,
help_text="A link to the Person connected to this alternative name.",
)
class Meta:
db_table = "opencivicdata_personname"
class PersonContactDetail(ContactDetailBase):
"""
Contact information for a Person.
"""
person = models.ForeignKey(
Person,
related_name="contact_details",
on_delete=models.CASCADE,
help_text="A link to the Person connected to this contact.",
)
class Meta:
db_table = "opencivicdata_personcontactdetail"
class PersonLink(LinkBase):
"""
URL for a document about a Person.
"""
person = models.ForeignKey(
Person,
related_name="links",
on_delete=models.CASCADE,
help_text="A reference to the Person connected to this link.",
)
class Meta:
db_table = "opencivicdata_personlink"
class PersonSource(LinkBase):
"""
Source used in assembling a Person.
"""
person = models.ForeignKey(
Person,
related_name="sources",
on_delete=models.CASCADE,
help_text="A link to the Person connected to this source.",
)
class Meta:
db_table = "opencivicdata_personsource"
class Membership(OCDBase):
"""
A relationship between a Person and an Organization, possibly including a Post.
"""
id = OCDIDField(ocd_type="membership")
organization = models.ForeignKey(
Organization,
related_name="memberships",
# memberships will go away if the org does
on_delete=models.CASCADE,
help_text="A link to the Organization in which the Person is a member.",
)
person = models.ForeignKey(
Person,
related_name="memberships",
null=True,
# Membership will just unlink if the person goes away
on_delete=models.SET_NULL,
help_text="A link to the Person that is a member of the Organization.",
)
person_name = models.CharField(
max_length=300,
blank=True,
default="",
help_text="The name of the Person that is a member of the Organization.",
)
post = models.ForeignKey(
Post,
related_name="memberships",
null=True,
# Membership will just unlink if the post goes away
on_delete=models.SET_NULL,
help_text=" The Post held by the | |
number.
:type i: int
:returns: subsample satisfying the testing condition.
:rtype: pandas.DataFrame
'''
return smp[self._true_cond(smp, i)]
def train_sample( self, smp, i, weights = None ):
'''
:param smp: input sample.
:type smp: pandas.DataFrame
:param i: fold number.
:type i: int
:param weights: possible name of the column holding the weights.
:type weights: str or None
:returns: subsample satisfying the training condition.
:rtype: pandas.DataFrame
'''
c = self._false_cond(smp, i)
out = smp[c]
if weights is None:
sw = None
else:
sw = self._handle_weights(out, weights)
return out, sw
class StdMVAmgr(MVAmgr):
def __init__( self, classifier, features, sigtrainfrac = 0.75, bkgtrainfrac = 0.75 ):
'''
Manager that uses the standard procedure of splitting the
samples given the training and testing fractions.
:param sigtrainfrac: fraction of signal events used for training.
:type sigtrainfrac: float
:param bkgtrainfrac: fraction of background events used for training.
:type bkgtrainfrac: float
.. seealso:: :meth:`MVAmgr.__init__`.
'''
MVAmgr.__init__(self, classifier, features )
self.sigtrainfrac = sigtrainfrac
self.bkgtrainfrac = bkgtrainfrac
def apply( self, sample ):
'''
Calculate the MVA method values for the given sample.
:param sample: input sample to apply the MVA method.
:type sample: pandas.DataFrame
:returns: output of the probability and prediction functions.
:rtype: pandas.DataFrame, pandas.DataFrame
.. seealso:: :meth:`MVAmgr.apply`.
'''
return self._process(self.mva, sample[self.features])
def fit( self, sig, bkg, is_sig, weights = None ):
'''
Fit the MVA classifier to the given sample.
:param sig: signal sample.
:type sig: pandas.DataFrame
:param bkg: background sample.
:type bkg: pandas.DataFrame
:param is_sig: signal flag.
:type is_sig: str
:param weights: possible name of the column holding the weights.
:type weights: str or None
:returns: training and testing data samples.
:rtype: tuple(pandas.DataFrame, pandas.DataFrame)
.. seealso:: :meth:`MVAmgr.fit`.
'''
info('Divide data in train and test samples')
info('Signal train fraction: {}'.format(self.sigtrainfrac), indent=1)
train_sig, test_sig = train_test_split(sig, train_size=self.sigtrainfrac)
info('Background train fraction: {}'.format(self.bkgtrainfrac), indent=1)
train_bkg, test_bkg = train_test_split(bkg, train_size=self.bkgtrainfrac)
if weights is not None:
train_sig_wgts = self._handle_weights(train_sig, weights)
train_bkg_wgts = self._handle_weights(train_bkg, weights)
test_sig_wgts = self._handle_weights(test_sig, weights)
test_bkg_wgts = self._handle_weights(test_bkg, weights)
train_wgts = pandas.concat([train_sig_wgts, train_bkg_wgts], ignore_index=True, sort=False)
test_wgts = pandas.concat([test_sig_wgts, test_bkg_wgts], ignore_index=True, sort=False)
else:
train_wgts = None
test_wgts = None
info('Merging training and test samples')
train_data = pandas.concat([train_sig, train_bkg], ignore_index=True, sort=False)
test_data = pandas.concat([test_sig, test_bkg], ignore_index=True, sort=False)
self.mva = deepcopy(self._fit(train_data, is_sig, train_wgts))
if weights is not None:
train_data[weights] = train_wgts
test_data[weights] = test_wgts
return train_data, test_data
def _do_mva_study( sigsmp, bkgsmp, cfg, outdir, weights, is_sig ):
'''
Do an MVA study.
:param sigsmp: signal sample.
:type sigsmp: pandas.DataFrame
:param bkgsmp: background sample.
:type bkgsmp: pandas.DataFrame
:param cfg: configurable for the MVA manager.
:type cfg: ConfMgr or dict
:param outdir: output directory. By default is set to "mva_outputs". \
The full output directory is actually determined from the configuration \
ID of the study so, assuming the default value, it would be under \
"mva_outputs/mva_<configuration ID>".
:type outdir: str
:param weights: name of the column representing the weights of the \
samples.
:type weights: str or None
:param is_sig: name for the additional column holding the \
signal condition.
:type is_sig: str
'''
# Create the output directory
_aux._makedirs(outdir)
cfg_path = os.path.join(outdir, 'config.xml')
# Path to the file storing the MVA function
func_path = os.path.join(outdir, 'func.pkl')
cfg['funcfile'] = func_path
# Generating the XML file must be the last thing to do
cfg.save(cfg_path)
# Display the configuration to run
print('''\
*************************
*** MVA configuration ***
*************************
{}
*************************\
'''.format(cfg), flush=True)
# Add the signal flag
info('Adding the signal flag')
sigsmp = sigsmp.copy()
sigsmp[is_sig] = __sig_flag__
bkgsmp = bkgsmp.copy()
bkgsmp[is_sig] = __bkg_flag__
# Build the MVA manager
mgr = cfg.proc_conf()[__manager_name__]
# Train the MVA method
info('Initialize training')
train, test = mgr.fit(sigsmp, bkgsmp, is_sig, weights)
# Save the output method(s)
mgr.save(func_path)
# Apply the MVA method
info('Apply the trained MVA algorithm')
for tp, smp in (('train', train), ('test', test)):
d, p = mgr.apply_for_overtraining(tp, smp)
smp[__mva_proba__] = d
smp[__mva_pred__] = p
info('Process finished!')
return mgr, train, test
def mva_study( signame, sigsmp, bkgname, bkgsmp, cfg,
outdir = 'mva_outputs',
weights = None,
is_sig = __is_sig__,
overwrite_if_exists = False,
return_dir = False,
extra_cfg = None
):
'''
Main function to perform a MVA study. The results are stored
in three different files: one storing the histograms and the
ROC curve, another with the configuration used to run this
function, and the last stores the proper class to store the
MVA algorithm.
:param signame: signal sample name.
:type signame: str
:param sigsmp: signal sample.
:type sigsmp: pandas.DataFrame
:param bkgname: background sample name.
:type bkgname: str
:param bkgsmp: background sample.
:type bkgsmp: pandas.DataFrame
:param cfg: configurable for the MVA manager.
:type cfg: ConfMgr or dict
:param outdir: output directory. By default is set to "mva_outputs". \
The full output directory is actually determined from the configuration \
ID of the study so, assuming the default value, it would be under \
"mva_outputs/mva_<configuration ID>".
:type outdir: str
:param weights: name of the column representing the weights of the \
samples.
:type weights: str or None
:param is_sig: name for the additional column holding the \
signal condition.
:type is_sig: str
:param overwrite_if_exists: if set to True, then the output directory will \
be recreated if it exists. Otherwise, it raises RuntimeError.
:type overwrite_if_exists: bool
:param return_dir: if set to True, the directory where the outputs are \
saved is also returned.
:type return_dir: bool
:param extra_cfg: additional configuration to be stored with the main manager.
:type extra_cfg: dict
:returns: MVA manager, training and testing samples it might also return \
the directory where the outputs are saved.
:rtype: tuple(MVAmgr, pandas.DataFrame, pandas.DataFrame (, str))
:raises RuntimeError: if "overwrite_if_exists = False", and a the output \
directory already exists. Or if an attempt is made to overwrite a file \
with the name of the directory.
'''
cfg = _preprocess_study_args(signame, bkgname, cfg, outdir, weights, extra_cfg)
if os.path.exists(outdir):
if overwrite_if_exists:
if not os.path.isdir(outdir):
raise RuntimeError('Attempt to overwrite a file with the name of the output directory')
shutil.rmtree(outdir)
else:
raise RuntimeError('Output directory already exists; run with "overwrite_if_exists=True" to overwrite it')
robjs = _do_mva_study(sigsmp, bkgsmp, cfg, outdir, weights, is_sig)
if return_dir:
robjs += (outdir,)
return robjs
def mva_study_with_id( signame, sigsmp, bkgname, bkgsmp, cfg,
outdir = 'mva_outputs',
weights = None,
is_sig = __is_sig__,
raise_if_matches = False,
return_dir = False,
return_cid = False,
extra_cfg = None,
):
'''
Main function to perform a MVA study. The results are stored
in three different files: one storing the histograms and the
ROC curve, another with the configuration used to run this
function, and the last stores the proper class to store the
MVA algorithm.
:param signame: signal sample name.
:type signame: str
:param sigsmp: signal sample.
:type sigsmp: pandas.DataFrame
:param bkgname: background sample name.
:type bkgname: str
:param bkgsmp: background sample.
:type bkgsmp: pandas.DataFrame
:param cfg: configurable for the MVA manager.
:type cfg: ConfMgr or dict
:param outdir: output directory. By default is set to "mva_outputs". \
The full output directory is actually determined from the configuration \
ID of the study so, assuming the default value, it would be under \
"mva_outputs/mva_<configuration ID>".
:type outdir: str
:param weights: name of the column representing the weights of the \
samples.
:type weights: str or None
:param is_sig: name for the additional column holding the \
signal condition.
:type is_sig: str
:param raise_if_matches: if set to True, a LookupError will be raised \
if it is found a configuration matching the input. This is useful when \
running many configurations. For example, if one wants to skip those \
which have already been studied.
:type raise_if_matches: bool
:param return_dir: if set to True, the directory where the outputs are \
saved is also returned.
:type return_dir: bool
:param return_cid: if set to True, return also the configuration ID.
:type return_cid: bool
:param extra_cfg: additional configuration to be stored with the main manager.
:type extra_cfg: dict
:returns: MVA manager, training and testing samples it might also return \
the directory where the outputs are saved and the configuration | |
<reponame>kitteltom/probabilistic-energy-forecasting<filename>models/deep_ar.py
import numpy as np
import datetime as dt
import time
from tqdm import tqdm
import os
import torch
from torch import nn
from torch.utils.data import TensorDataset, DataLoader
from models.forecast_model import ForecastModel
from distributions.empirical import Empirical
import utils
class DeepAR(nn.Module, ForecastModel, Empirical):
"""
Implements the global probabilistic forecasting model called DeepAR (Salinas et. al., 2020).
"""
def __init__(
self, y, t, u=None, ID='', seed=0,
prediction_length=192,
num_samples=100,
embedding_dim=None,
num_layers=3,
num_cells=40,
epochs=50,
batch_size=512
):
nn.Module.__init__(self)
ForecastModel.__init__(self, y, t, u, ID, seed=seed, global_model=True)
# Fix the seed
torch.manual_seed(seed)
# Set the device
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Running on {self.device}.')
self.seq_len = self.s_w + prediction_length
self.seq_delta = self.s_d
self.num_samples = num_samples
self.embedding_dim = embedding_dim if embedding_dim is not None else int(np.sqrt(self.n))
self.num_layers = num_layers
self.num_cells = num_cells
self.epochs = epochs
self.batch_size = batch_size
self.lags_seq = [1, 2, 3, self.s_d - 1, self.s_d, self.s_d + 1, self.s_w - 1, self.s_w, self.s_w + 1]
num_features = 6 + len(self.lags_seq) + self.embedding_dim
if u is not None:
num_features += u.shape[1]
self.embedding = nn.Embedding(self.n, self.embedding_dim)
self.lstm = nn.LSTM(
input_size=num_features,
hidden_size=num_cells,
num_layers=num_layers,
batch_first=True,
# dropout=0.1
)
self.mu_fn = nn.Sequential(
nn.Linear(num_cells, 1)
)
self.sigma2_fn = nn.Sequential(
nn.Linear(num_cells, 1),
nn.Softplus()
)
self.loss_fn = nn.GaussianNLLLoss()
self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
self.to(self.device)
self.X_mean = 0
self.X_std = 1
self.samples_y = np.zeros((self.num_samples, 0, self.n))
# for i in range(self.n):
# self.results[i]['samples_y'] = []
# Load a trained model if applicable
self.model_path = os.path.join(self.get_out_dir(), '_state_dict_' + self.results[0]["ID"])
if os.path.exists(self.model_path):
self.create_features(self.t, self.u, fit=True)
self.load_state_dict(torch.load(self.model_path, map_location=self.device))
def __str__(self):
return f'DeepAR{self.seed}'
def forward(self, X, h=None):
"""
Forward function of the RNN. Includes an embedding, a LSTM and two separate affine output layers for the
distribution parameters.
"""
embeds = self.embedding(X[:, :, -1].int())
X = torch.cat([X[:, :, :-1], embeds], dim=2)
lstm_out, h = self.lstm(X, h)
lstm_out = lstm_out.reshape(-1, self.num_cells)
return self.mu_fn(lstm_out), self.sigma2_fn(lstm_out), h
def create_features(self, t, u=None, fit=False):
"""
Creates a standardized feature vector consisting of time features and optionally covariates u.
Note that fit must be set to True during training.
"""
seconds = t.map(dt.datetime.timestamp).to_numpy(float)
day = 24 * 60 * 60
cos_d = np.cos((2 * np.pi / day) * seconds)
sin_d = np.sin((2 * np.pi / day) * seconds)
week = day * 7
cos_w = np.cos((2 * np.pi / week) * seconds)
sin_w = np.sin((2 * np.pi / week) * seconds)
year = day * 365.2425
cos_y = np.cos((2 * np.pi / year) * seconds)
sin_y = np.sin((2 * np.pi / year) * seconds)
X = np.vstack([cos_d, sin_d, cos_w, sin_w, cos_y, sin_y]).T
if u is not None:
X = np.hstack([X, u])
if fit:
self.X_mean = np.mean(X, axis=0, keepdims=True)
self.X_std = np.std(X, axis=0, keepdims=True)
X = utils.standardize(X, self.X_mean, self.X_std)
return self.tensor(X)
def create_labels(self, y):
"""
Creates rescaled log observations as labels for training.
"""
y = utils.interpolate_nans(y)
return self.tensor(np.log(y / self.y_mean).T[..., np.newaxis])
def create_input(self, t, u=None, y_lags=(), categories=None, fit=False, samples=False):
"""
Creates the input vector for the RNN consisting of time features, covariates, lagged log observations
and time series dependent categorical features.
"""
X = self.create_features(t, u, fit=fit)
if samples:
X = X.repeat(self.num_samples, 1, 1)
else:
X = X.repeat(self.n, 1, 1)
for y_lag in y_lags:
X = torch.cat([X, y_lag], dim=2)
categories = torch.unsqueeze(
categories.repeat(len(t), self.num_samples if samples else 1).T,
dim=2
)
X = torch.cat([X, categories], dim=2)
return X
def to_sequence(self, x):
"""
Creates training sequences of length self.seq_len by reshaping the array of input vectors x.
"""
num_seq_per_series = (x.shape[1] - self.seq_len + self.seq_delta) // self.seq_delta
seq = torch.zeros(self.n, num_seq_per_series, self.seq_len, x.shape[2])
for i in range(num_seq_per_series):
seq[:, i, :, :] = x[:, i * self.seq_delta:i * self.seq_delta + self.seq_len, :]
return seq.reshape(-1, self.seq_len, x.shape[2])
@staticmethod
def tensor(x):
"""
Numpy array -> torch tensor.
"""
return torch.from_numpy(x).float()
@staticmethod
def numpy(x):
"""
Torch tensor -> numpy array.
"""
return x.cpu().detach().numpy().astype(float).squeeze()
def train_val_split(self):
"""
Splits the data into 20% validation and 80% training set for early stopping.
"""
split = int((len(self.t) * 0.2) // self.seq_delta) * self.seq_delta
y_train, y_val = self.y[split:], self.y[:split]
t_train, t_val = self.t[split:], self.t[:split]
if self.u is not None:
u_train, u_val = self.u[split:], self.u[:split]
else:
u_train, u_val = None, None
return y_train, y_val, t_train, t_val, u_train, u_val
def get_data_loader(self, y, t, u, fit=False):
"""
Returns a data loader for the observations y, timestamps t and covariates u. The functions creates
labels and input vectors, transforms them to sequences and then converts them into a TensorDataset.
"""
y = self.create_labels(y)
y_lags = []
for lag in self.lags_seq:
y_lags.append(torch.hstack([y[:, :lag], y[:, :-lag]]))
X = self.create_input(
t,
u,
categories=self.tensor(np.arange(self.n)),
y_lags=y_lags,
fit=fit
)
data = TensorDataset(
self.to_sequence(X),
self.to_sequence(y)
)
return DataLoader(data, batch_size=self.batch_size, shuffle=fit)
def fit(self):
"""
Trains the DeepAR model using backprop with the ADAM optimizer and Gaussian negative log likelihood loss.
Performs early stopping by evaluating the loss on the validation set.
"""
super().fit()
start_time = time.time()
y_train, y_val, t_train, t_val, u_train, u_val = self.train_val_split()
train_dataloader = self.get_data_loader(y_train, t_train, u_train, fit=True)
val_dataloader = self.get_data_loader(y_val, t_val, u_val, fit=False)
train_loss = np.zeros(self.epochs)
val_loss = np.zeros(self.epochs)
best_val_loss = 0
for epoch in range(self.epochs):
# Train mode
self.train()
batch_cnt = 1
with tqdm(train_dataloader, miniters=int(np.sqrt(len(train_dataloader)))) as batches:
for X, y in batches:
X, y = X.to(self.device), y.to(self.device)
batches.set_description(f'Epoch {epoch + 1:>2}', refresh=False)
# Forward pass
mu_y, sigma2_y, _ = self(X)
loss = self.loss_fn(mu_y, y.reshape(-1, 1), sigma2_y)
train_loss[epoch] += (loss.item() - train_loss[epoch]) / batch_cnt
batch_cnt += 1
# Backprop
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batches.set_postfix(loss=train_loss[epoch], refresh=False)
val_loss[epoch] = self.val(val_dataloader)
# Early stopping
if val_loss[epoch] < best_val_loss:
best_val_loss = val_loss[epoch]
# Save the trained model
torch.save(self.state_dict(), self.model_path)
# After training, load the best model
self.load_state_dict(torch.load(self.model_path, map_location=self.device))
fit_time = time.time() - start_time
for i in range(self.n):
self.results[i]['fit_time'] = fit_time / self.n
self.results[i]['train_loss'] = train_loss.tolist()
self.results[i]['val_loss'] = val_loss.tolist()
def val(self, val_dataloader):
"""
Computes the validation loss.
"""
# Eval mode
self.eval()
val_loss = 0
with torch.no_grad():
for X, y in val_dataloader:
X, y = X.to(self.device), y.to(self.device)
# Forward pass
mu_y, sigma2_y, _ = self(X)
loss = self.loss_fn(mu_y, y.reshape(-1, 1), sigma2_y)
val_loss += loss.item()
return val_loss / len(val_dataloader)
def sample_per_sample(self, h, y, t, u):
"""
Computes forecast sample paths by recursively sampling from a Gaussian distribution with the forecast
distribution parameters. Here, sample paths are generated for all time series in parallel. Sample path
after sample path.
"""
prediction_length = len(t)
samples_y = np.zeros((self.num_samples, prediction_length, self.n))
for sample in tqdm(range(self.num_samples), miniters=int(np.sqrt(self.num_samples))):
h_tilde = (
h[0].detach().clone(),
h[1].detach().clone()
)
y_tilde = y.detach().clone()
for step in range(prediction_length):
y_lags = []
for lag in self.lags_seq:
y_lags.append(torch.unsqueeze(y_tilde[:, -lag], dim=1))
X = self.create_input(
t[step:step + 1],
u[step:step + 1] if u is not None else None,
categories=self.tensor(np.arange(self.n)),
y_lags=y_lags
)
mu_y, sigma2_y, h_tilde = self(X.to(self.device), h_tilde)
y_tilde = torch.hstack([
y_tilde,
torch.unsqueeze(torch.normal(mu_y, torch.sqrt(sigma2_y)), dim=1).cpu()
])
samples_y[sample] = np.exp(self.numpy(y_tilde[:, -prediction_length:]).T) * self.y_mean[np.newaxis]
return samples_y
def sample_per_time_series(self, h, y, t, u):
"""
Computes forecast sample paths by recursively sampling from a Gaussian distribution with the forecast
distribution parameters. Here, the different sample paths are generated in parallel. Time series after
time series.
"""
prediction_length = len(t)
samples_y = np.zeros((self.num_samples, prediction_length, self.n))
for i in tqdm(range(self.n), miniters=int(np.sqrt(self.n))):
h_tilde = (
h[0][:, i:i + 1].repeat(1, self.num_samples, 1),
h[1][:, i:i + 1].repeat(1, self.num_samples, 1)
)
y_tilde = y[i, -self.lags_seq[-1]:].repeat(self.num_samples, 1, 1)
for step in range(prediction_length):
y_lags = []
for lag in self.lags_seq:
y_lags.append(torch.unsqueeze(y_tilde[:, -lag], dim=1))
X = self.create_input(
t[step:step + 1],
u[step:step + 1] if u is not None else None,
categories=self.tensor(np.array(i)),
y_lags=y_lags,
samples=True
)
mu_y, sigma2_y, h_tilde = self(X.to(self.device), h_tilde)
y_tilde = torch.hstack([
y_tilde,
torch.unsqueeze(torch.normal(mu_y, torch.sqrt(sigma2_y)), dim=1).cpu()
])
samples_y[:, :, i] = np.exp(self.numpy(y_tilde[:, -prediction_length:])) * self.y_mean[i]
return samples_y
def predict(self, t, u=None):
"""
Predicts the distribution of observations y by recursively computing sample paths for the timestamps t,
optionally given covariates u.
"""
if super().predict(t, u):
return
start_time = time.time()
conditioning_length = self.seq_len - len(t)
y = self.create_labels(self.y[-(conditioning_length + self.lags_seq[-1]):])
y_lags = []
for lag in self.lags_seq:
y_lags.append(y[:, -(conditioning_length + lag):-lag])
X = self.create_input(
self.t[-conditioning_length:],
self.u[-conditioning_length:] if u is not None else None,
categories=self.tensor(np.arange(self.n)),
y_lags=y_lags
)
# Eval mode
self.eval()
with torch.no_grad():
_, _, | |
<filename>root/filter/image_filter.py
#!/usr/bin/python
import imageio
import matplotlib.pyplot as plt
import numpy as np
from root.util import ImageUtil as util
from PIL import Image
_MIN_PIXEL = 0
_MAX_PIXEL = 255
class ImageFilter():
@staticmethod
def isGrayScale(img):
if len(img.shape) == 2:
return True
return False
@staticmethod
def isRGB(img):
if len(img.shape) == 3:
return True
return False
@staticmethod
def read_image(image_path, type="RGB"):
return imageio.imread(image_path, as_gray=False, pilmode=type)
@staticmethod
def save_image(name, image_as_byte):
imageio.imwrite(name, image_as_byte)
@staticmethod
def normalize_image(img):
min_input = img.min()
max_input = img.max()
min_output = _MIN_PIXEL
max_output = _MAX_PIXEL
return (img - min_input) * ((max_output - min_output) / (max_input - min_input) + min_output)
@staticmethod
def apply_negative(img):
if len(img.shape) == 2:
return _MAX_PIXEL - img
else:
i = img.copy()
i[:,:,0] = _MAX_PIXEL - img[:,:,0]
i[:,:,1]= _MAX_PIXEL - img[:,:,1]
i[:,:,2] = _MAX_PIXEL - img[:,:,2]
return i
@staticmethod
def apply_logarithmic(img,c = 0):
max_obtained = np.max(img)
if c == 0:
c = (_MAX_PIXEL/np.log(1+_MAX_PIXEL))
log_img = c * np.log(img.astype(np.double)+1)
return log_img.astype(np.uint8)
@staticmethod
def apply_gamma_correction(img, gamma):
c = _MAX_PIXEL / (1+ _MAX_PIXEL)**gamma
gamma_correction = c * (img**gamma)
return gamma_correction
@staticmethod
def draw_histogram(img, img_name, color="black"):
data = img.flatten()
plt.hist(data, _MAX_PIXEL + 1, [0, 256], color=color, ec=color)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Pixel value')
plt.ylabel('Amount')
plt.savefig(img_name)
plt.close()
@staticmethod
def histogram(image, bins=256):
if len(image.shape) == 2: # Grayscale Image
hist = np.zeros(bins, dtype=np.int)
flat = np.asarray(image)
flat = flat.flatten()
for pxl in flat:
hist[int(round(pxl,5))] += 1
return hist
else: # RGB Image
r, g, b = np.zeros(bins), np.zeros(bins), np.zeros(bins)
for row in range(image.shape[0]):
for col in range(image.shape[1]):
r[image[row, col][0]] += 1
g[image[row, col][1]] += 1
b[image[row, col][2]] += 1
return (r, g, b)
@staticmethod
def apply_histogram_equalization(img):
# Getting the pixel values of the image
original = np.array(img)
# Creating a new matrix for the image
equalized_img = np.copy(original)
# Getting unique pixels and frequency of the values from the image
unique_pixels, pixels_frequency = np.unique(
original, return_counts=True)
# Image pixels divided by the size of the image
pk = pixels_frequency / img.size
pk_length = len(pk)
# Getting the cummulative frequency of the unique pixel values
sk = np.cumsum(pk)
# Multiplying the cummulative frequency by the maximum value of the pixels
mul = sk * np.max(original)
roundVal = np.round(mul)
if len(img.shape) == 2:
# Mapping the pixels for the equalization
for i in range(len(original)):
for j in range(len(original[0])):
equalized_img[i][j] = roundVal[np.where(
unique_pixels == original[i][j])]
else:
R = ImageFilter.apply_histogram_equalization(img[:,:,0])
G = ImageFilter.apply_histogram_equalization(img[:,:,1])
B = ImageFilter.apply_histogram_equalization(img[:,:,2])
output = np.zeros((R.shape[0], R.shape[1], 3), dtype=np.uint8)
output[:,:,0] = R
output[:,:,1] = G
output[:,:,2] = B
equalized_img = output
return equalized_img
# @staticmethod
# def equalize_hist(image, hist):
# if len(image.shape) == 2: # Grayscale Image
# hist = histogram(image)
# x = iter(hist)
# y = [next(x)]
# for i in x:
# y.append(y[-1] + i)
# y = np.array(y)
# y = ((y - y.min()) * 255) / (y.max() - y.min())
# y = y.astype(np.uint8)
# cdf = y
# img = (np.asarray(image)).flatten()
# flat = np.zeros_like(img, dtype=np.uint8)
# for i in range(len(flat)):
# flat[i] = int(round(img[i],5))
# output = cdf[flat]
# output = np.reshape(output, image.shape)
# output[np.where(output > MAX_PIXEL)] = MAX_PIXEL
# return output.astype(np.uint8)
# else:
# R = ImageFilter.equalize_hist(hist)
# G = ImageFilter.equalize_hist(hist)
# B = ImageFilter.equalize_hist(hist)
# output = np.zeros((R.shape[0], R.shape[1], 3), dtype=np.uint8)
# output[:,:,0] = R
# output[:,:,1] = G
# output[:,:,2] = B
# obtained = output
# return obtained
@staticmethod
def __get_neighbors_matrix(filter_size, i, j, data):
mid_position = filter_size // 2
neighbors = []
for z in range(filter_size):
if i + z - mid_position < 0 or i + z - mid_position > len(data) - 1:
for c in range(filter_size):
neighbors.append(0)
elif j + z - mid_position < 0 or j + mid_position > len(data[0]) - 1:
neighbors.append(0)
else:
for k in range(filter_size):
neighbors.append(data[i + z - mid_position]
[j + k - mid_position])
return neighbors
@staticmethod
def get_median(filter_size, i, j, data):
mid_position = filter_size // 2
neighbors = ImageFilter.__get_neighbors_matrix(filter_size, i, j, data)
neighbors.sort()
return neighbors[len(neighbors) // 2]
@staticmethod
def apply_median(img, filter_size):
filter_size = util.format_filter_size(filter_size)
obtained, original = util.get_empty_image_with_same_dimensions(
img)
if len(img.shape) == 2:
for i in range(len(original)):
for j in range(len(original[0])):
obtained[i][j] = ImageFilter.get_median(
filter_size, i, j, original)
else:
R = ImageFilter.apply_median(img[:,:,0],filter_size)
G = ImageFilter.apply_median(img[:,:,1],filter_size)
B = ImageFilter.apply_median(img[:,:,2],filter_size)
output = np.zeros((R.shape[0], R.shape[1], 3), dtype=np.uint8)
output[:,:,0] = R
output[:,:,1] = G
output[:,:,2] = B
obtained = output
return obtained
@staticmethod
def apply_piecewise_linear(img, coordinates_x, coordinates_y):
"""Apply Piecewise Linear filter on an image basead on an group of coordinates.
Parameters
----------
img : numpy array
The target image where the filter would be applied
coordinates_x : array
The coordinates X from all points to the interpolated already in the desired order.
coordinates_y : array
The coordinates Y from all points to the interpolated already in the desired order.
Returns
-------
numpy array
an array representing the obtained image after apply the filter
"""
x = np.array(range(0, _MAX_PIXEL + 1), dtype=np.uint8)
interp = np.interp(x, coordinates_x, coordinates_y)
obtained = img.copy()
height, width = util.get_dimensions(obtained)
if len(img.shape) == 2:
for i in range(height):
for j in range(width):
index = int(np.round(obtained[i][j]))
obtained[i][j] = interp[index]
else:
R = ImageFilter.apply_piecewise_linear(img[:,:,0],coordinates_x, coordinates_y)
G = ImageFilter.apply_piecewise_linear(img[:,:,1],coordinates_x, coordinates_y)
B = ImageFilter.apply_piecewise_linear(img[:,:,2],coordinates_x, coordinates_y)
output = np.zeros((R.shape[0], R.shape[1], 3), dtype=np.uint8)
output[:,:,0] = R
output[:,:,1] = G
output[:,:,2] = B
obtained = output
return obtained
@staticmethod
def apply_convolution(image, kernel):
if len(image.shape) == 2:
image_padded = np.zeros((image.shape[0] + 2, image.shape[1] + 2))
image_padded[1:-1, 1:-1] = image
out = np.zeros_like(image)
for x in range(image.shape[1]):
for y in range(image.shape[0]):
out[y, x] = (kernel * image_padded[y:y + 3, x:x + 3]).sum()
else:
R = ImageFilter.apply_convolution(image[:,:,0],kernel)
G = ImageFilter.apply_convolution(image[:,:,1],kernel)
B = ImageFilter.apply_convolution(image[:,:,2],kernel)
output = np.zeros((R.shape[0], R.shape[1], 3), dtype=np.uint8)
output[:,:,0] = R
output[:,:,1] = G
output[:,:,2] = B
out = output
return out
# @staticmethod
# def apply_convolution(img, filter_matrix):
# obtained, original = util.get_empty_image_with_same_dimensions(
# img)
# height, width = util.get_dimensions(img)
# #Se for grayscale
# if len(img.shape) == 2:
# for row in range(1, height - 1):
# for col in range(1, width - 1):
# value = filter_matrix * \
# img[(row - 1):(row + 2), (col - 1):(col + 2)]
# max_obtained_value = max(0, value.sum())
# obtained[row, col] = min(max_obtained_value, _MAX_PIXEL)
# else:
# channel = img.shape[2]
# for c in range (channel):
# for row in range(1, height - 1):
# for col in range(1, width - 1):
# value = filter_matrix * \
# img[(row - 1):(row + 2), (col - 1):(col + 2),c]
# max_obtained_value = max(0, value.sum())
# obtained[row, col,c] = min(max_obtained_value, _MAX_PIXEL)
# return obtained
@staticmethod
def apply_laplacian(img):
kernel = np.array([
[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])
obtained = ImageFilter.apply_convolution(img, kernel)
norm_obtained = util.normalize_image(obtained)
sharpened = img + norm_obtained
norm_sharpened = util.normalize_image(sharpened)
return norm_obtained, norm_sharpened
@staticmethod
def create_gaussian_kernel(filter_size, sigma):
"""
Creates a 2D gaussian kernel using filter_size and sigma
"""
filter_size = util.format_filter_size(filter_size)
ax = np.linspace(-(filter_size - 1) / 2.,
(filter_size - 1) / 2., filter_size)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-0.5 * (np.square(xx) +
np.square(yy)) / np.square(sigma))
return kernel / np.sum(kernel)
@staticmethod
def apply_gaussian(img, filter_size=3, sigma=1.):
kernel = ImageFilter.create_gaussian_kernel(filter_size, sigma)
return ImageFilter.apply_convolution(img, kernel)
@staticmethod
def apply_sobel(img):
# Horizontal sobel matrix
horizontal = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
# Vertical sobel matrix
vertical = np.array([
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]])
height, width = util.get_dimensions(img)
# define images with 0s
new_horizontal_image = np.zeros((height, width), np.uint8)
new_vertical_image = np.zeros((height, width), np.uint8)
new_gradient_image = np.zeros((height, width), np.uint8)
if len(img.shape) == 2:
# # define images with 0s
# new_horizontal_image = np.zeros((height, width), np.uint8)
# new_vertical_image = np.zeros((height, width), np.uint8)
# new_gradient_image = np.zeros((height, width), np.uint8)
for i in range(1, height - 1):
for j in range(1, width - 1):
horizontal_grad = ImageFilter.apply_gradient_core(
horizontal, img, i, j)
new_horizontal_image[i - 1, j - 1] = abs(horizontal_grad)
vertical_grad = ImageFilter.apply_gradient_core(
vertical, img, i, j)
new_vertical_image[i - 1, j - 1] = abs(vertical_grad)
# Edge Magnitude
new_gradient_image[i - 1, j - 1] = | |
"""Functions to calculate the quidel sensor statistic."""
import numpy as np
import pandas as pd
def _prop_var(p, n):
"""
Calculate variance of proportion.
var(X/n) = 1/(n^2)var(X) = (npq)/(n^2) = pq/n
"""
return p * (1 - p) / n
def fill_dates(y_data, first_date, last_date):
"""
Ensure all dates are listed in the data, otherwise, add days with 0 counts.
Args:
y_data: dataframe with datetime index
first_date: datetime.datetime
first date to be included
last_date: datetime.datetime
last date to be inclluded
Returns: dataframe containing all dates given
"""
cols = y_data.columns
if first_date not in y_data.index:
y_data = y_data.append(pd.DataFrame(dict.fromkeys(cols, 0.),
columns=cols, index=[first_date]))
if last_date not in y_data.index:
y_data = y_data.append(pd.DataFrame(dict.fromkeys(cols, 0.),
columns=cols, index=[last_date]))
y_data.sort_index(inplace=True)
y_data = y_data.asfreq('D', fill_value=0)
y_data.fillna(0, inplace=True)
return y_data
def _slide_window_sum(arr, k):
"""
Sliding window sum, with fixed window size k.
For indices 0:k, we DO compute a sum, using whatever points are available.
Reference: https://stackoverflow.com/a/38507725
Args:
arr: np.ndarray
Array over which to calculate sliding window sum
k: int
Window size
Returns:
sarr: np.ndarray
Array of same length of arr, holding the sliding window sum.
"""
if not isinstance(k, int):
raise ValueError('k must be int.')
temp = np.append(np.zeros(k - 1), arr)
sarr = np.convolve(temp, np.ones(k, dtype=int), 'valid')
return sarr
def _geographical_pooling(tpooled_tests, tpooled_ptests, min_obs, max_borrow_obs):
"""
Calculate proportion of parent samples (tests) that must be "borrowed" to compute the statistic.
If there are no samples available in the parent, the borrow_prop is 0. If the parent does not
have enough samples, we return a borrow_prop of 1, and the fact that the
pooled samples are insufficient are handled in the statistic fitting step.
Args:
tpooled_tests: np.ndarray[float]
Number of tests after temporal pooling.
There should be no np.nan here.
tpooled_ptests: np.ndarray[float]
Number of parent tests after temporal pooling.
There should be no np.nan here.
min_obs: int
Minimum number of observations in order to compute a ratio
max_borrow_obs: int
Maximum number of observations can be borrowed in geographical pooling
Returns:
np.ndarray[float]
Same length as tests; proportion of parent observations to borrow.
"""
if (np.any(np.isnan(tpooled_tests)) or np.any(np.isnan(tpooled_ptests))):
print(tpooled_tests)
print(tpooled_ptests)
raise ValueError('[parent] tests should be non-negative '
'with no np.nan')
if max_borrow_obs > min_obs:
raise ValueError('The maximum umber of observations can be borrowed '
'in geographical pooling should be smaller than '
'the minimum number of observations in order to '
'compute a ratio')
# STEP 1: "TOP UP" USING PARENT LOCATION
# Number of observations we need to borrow to "top up"
borrow_tests = np.maximum(
np.minimum(min_obs - tpooled_tests, max_borrow_obs), 0)
# There are many cases (a, b > 0):
# Case 1: a / b => no problem
# Case 2: a / 0 => np.inf => borrow_prop becomes 1
# Case 3: 0 / a => no problem
# Case 4: 0 /0 => np.nan => 0 this can happen when a
# region has enough observations but its parent has nothing.
# We ignore RuntimeWarnings and handle them ourselves.
# Reference: https://stackoverflow.com/a/29950752
with np.errstate(divide='ignore', invalid='ignore'):
borrow_prop = borrow_tests / tpooled_ptests
# If there's nothing to borrow, then ya can't borrow
borrow_prop[np.isnan(borrow_prop)] = 0
# Can't borrow more than total no. observations.
# Relies on the fact that np.inf > 1
borrow_prop[borrow_prop > 1] = 1
return borrow_prop
def raw_positive_prop(positives, tests, min_obs):
"""
Calculate proportion of positive tests for a single location with no temporal smoothing.
If on any day t, tests[t] < min_obs, then we report np.nan.
The second and third returned np.ndarray are the standard errors,
calculated using the binomial proportion variance _prop_var(); and
the sample size.
Args:
positives: np.ndarray[float]
Number of positive tests, ordered in time, where each array element
represents a subsequent day. If there were no positive tests or
there were no tests performed, this should be zero (never np.nan).
tests: np.ndarray[float]
Number of tests performed. If there were no tests performed, this
should be zero (never np.nan). We should always have
positive[t] <= tests[t] for all t.
min_obs: int
Minimum number of observations in order to compute a proportion.
pool_days: int
Will not be used, just to keep the format the same for raw and smoothed
Returns:
np.ndarray
Proportion of positive tests on each day, with the same length
as positives and tests.
np.ndarray
Standard errors, calculated using the usual binomial variance.
Of the same length as above.
np.ndarray
Sample size used to compute estimates.
"""
positives = positives.astype(float)
tests = tests.astype(float)
if np.any(np.isnan(positives)) or np.any(np.isnan(tests)):
print(positives, tests)
raise ValueError('positives and tests should be non-negative '
'with no np.nan')
if np.any(positives > tests):
raise ValueError('positives should not exceed tests')
if min_obs <= 0:
raise ValueError('min_obs should be positive')
# nan out any days where there are insufficient observations
# this also elegantly sidesteps 0/0 division.
tests[tests < min_obs] = np.nan
positive_prop = positives / tests
se = np.sqrt(_prop_var(positive_prop, tests))
sample_size = tests
return positive_prop, se, sample_size
def smoothed_positive_prop(positives, tests, min_obs, max_borrow_obs, pool_days,
parent_positives=None, parent_tests=None):
"""
Calculate the proportion of negative tests for a single location with temporal smoothing.
For a given day t, if sum(tests[(t-pool_days+1):(t+1)]) < min_obs, then we
'borrow' min_obs - sum(tests[(t-pool_days+1):(t+1)]) observations from the
parents over the same timespan. Importantly, it will make sure NOT to
borrow observations that are _already_ in the current geographic partition
being considered.
If min_obs is specified but not satisfied over the pool_days, and
parent arrays are not provided, then we report np.nan.
The second and third returned np.ndarray are the standard errors,
calculated using the binomial proportion variance _prop_var(); and
the reported sample_size.
Args:
positives: np.ndarray[float]
Number of positive tests, ordered in time, where each array element
represents a subsequent day. If there were no positive tests or
there were no tests performed, this should be zero (never np.nan).
tests: np.ndarray[float]
Number of tests performed. If there were no tests performed, this
should be zero (never np.nan). We should always have
positives[t] <= tests[t] for all t.
min_obs: int
Minimum number of observations in order to compute a proportion.
max_borrow_obs: int
Maximum number of observations can be borrowed in geographical pooling
pool_days: int
Number of days in the past (including today) over which to pool data.
parent_positives: np.ndarray
Like positives, but for the parent geographic partition (e.g., State)
If this is None, then this shall have 0 positives uniformly.
parent_tests: np.ndarray
Like tests, but for the parent geographic partition (e.g., State)
If this is None, then this shall have 0 tests uniformly.
Returns:
np.ndarray
Proportion of positive tests after the pool_days pooling, with the same
length as positives and tests.
np.ndarray
Standard errors, calculated using the usual binomial variance.
Of the same length as above.
np.ndarray
Effective sample size (after temporal and geographic pooling).
"""
positives = positives.astype(float)
tests = tests.astype(float)
if (parent_positives is None) or (parent_tests is None):
has_parent = False
else:
has_parent = True
parent_positives = parent_positives.astype(float)
parent_tests = parent_tests.astype(float)
if np.any(np.isnan(positives)) or np.any(np.isnan(tests)):
raise ValueError('positives and tests '
'should be non-negative with no np.nan')
if np.any(positives > tests):
raise ValueError('positives should not exceed tests')
if has_parent:
if np.any(np.isnan(parent_positives)) or np.any(np.isnan(parent_tests)):
raise ValueError('parent positives and parent tests '
'should be non-negative with no np.nan')
if np.any(parent_positives > parent_tests):
raise ValueError('positives should not exceed tests')
if min_obs <= 0:
raise ValueError('min_obs should be positive')
if (pool_days <= 0) or not isinstance(pool_days, int):
raise ValueError('pool_days should be a positive int')
# STEP 0: DO THE TEMPORAL POOLING
tpooled_positives = _slide_window_sum(positives, pool_days)
tpooled_tests = _slide_window_sum(tests, pool_days)
if has_parent:
tpooled_ppositives = _slide_window_sum(parent_positives, pool_days)
tpooled_ptests = _slide_window_sum(parent_tests, pool_days)
borrow_prop = _geographical_pooling(tpooled_tests, tpooled_ptests,
min_obs, max_borrow_obs)
pooled_positives = (tpooled_positives
+ borrow_prop * tpooled_ppositives)
pooled_tests = (tpooled_tests
+ borrow_prop * tpooled_ptests)
else:
pooled_positives = tpooled_positives
pooled_tests = tpooled_tests
## STEP 2: CALCULATE AS THOUGH THEY'RE RAW
return raw_positive_prop(pooled_positives, pooled_tests, min_obs)
def raw_tests_per_device(devices, tests, min_obs):
"""
Calculate the tests per device for a single geographic location, without any temporal smoothing.
If on any day t, tests[t] < min_obs, then we report np.nan.
The second and | |
determined from the relation
`counts_total = counts_signal + counts_background`
Note that if `background_variance=0`, it makes more sense to use
`GammaUpperLimit`, which is equivalent but analytical rather than
numerical.
"""
self.limit = limit
self.confidence_level = confidence_level
_d_unscaled = GeneralGammaDistributionPositive(
scale_factor=1,
counts_total=counts_total,
counts_background=counts_background,
counts_signal=counts_signal,
background_variance=background_variance)
limit_unscaled = _d_unscaled.ppf(self.confidence_level)
# use the value of the limit to determine the scale factor
scale_factor = self.limit / limit_unscaled
super().__init__(
scale_factor=scale_factor,
counts_total=counts_total,
counts_background=counts_background,
counts_signal=counts_signal,
background_variance=background_variance)
def __repr__(self):
return ('flavio.statistics.probability.GeneralGammaUpperLimit'
'({}, {}, counts_total={}, counts_signal={}, '
'background_variance={})').format(self.limit,
self.confidence_level,
self.counts_total,
self.counts_signal,
self.background_variance)
class KernelDensityEstimate(NumericalDistribution):
"""Univariate kernel density estimate.
Parameters:
- `data`: 1D array
- `kernel`: instance of `ProbabilityDistribution` used as smoothing kernel
- `n_bins` (optional): number of bins used in the intermediate step. This normally
does not have to be changed.
"""
def __init__(self, data, kernel, n_bins=None):
self.data = data
assert kernel.central_value == 0, "Kernel density must have zero central value"
self.kernel = kernel
self.n = len(data)
if n_bins is None:
self.n_bins = min(1000, self.n)
else:
self.n_bins = n_bins
y, x_edges = np.histogram(data, bins=self.n_bins, density=True)
x = (x_edges[:-1] + x_edges[1:])/2.
self.y_raw = y
self.raw_dist = NumericalDistribution(x, y)
cdist = convolve_distributions([self.raw_dist, self.kernel], 'sum')
super().__init__(cdist.x, cdist.y)
def __repr__(self):
return 'flavio.statistics.probability.KernelDensityEstimate' + \
'({}, {}, {})'.format(self.data, repr(self.kernel), self.n_bins)
class GaussianKDE(KernelDensityEstimate):
"""Univariate Gaussian kernel density estimate.
Parameters:
- `data`: 1D array
- `bandwidth` (optional): standard deviation of the Gaussian smoothing kernel.
If not provided, Scott's rule is used to estimate it.
- `n_bins` (optional): number of bins used in the intermediate step. This normally
does not have to be changed.
"""
def __init__(self, data, bandwidth=None, n_bins=None):
if bandwidth is None:
self.bandwidth = len(data)**(-1/5.) * np.std(data)
else:
self.bandwidth = bandwidth
super().__init__(data=data,
kernel = NormalDistribution(0, self.bandwidth),
n_bins=n_bins)
def __repr__(self):
return 'flavio.statistics.probability.GaussianKDE' + \
'({}, {}, {})'.format(self.data, self.bandwidth, self.n_bins)
class MultivariateNormalDistribution(ProbabilityDistribution):
"""A multivariate normal distribution.
Parameters:
- central_value: the location vector
- covariance: the covariance matrix
- standard_deviation: the square root of the variance vector
- correlation: the correlation matrix
If the covariance matrix is not specified, standard_deviation and the
correlation matrix have to be specified.
Methods:
- get_random(size=None): get `size` random numbers (default: a single one)
- logpdf(x, exclude=None): get the logarithm of the probability density
function. If an iterable of integers is given for `exclude`, the parameters
at these positions will be removed from the covariance before evaluating
the PDF, effectively ignoring certain dimensions.
Properties:
- error_left, error_right: both return the vector of standard deviations
"""
def __init__(self, central_value, covariance=None,
standard_deviation=None, correlation=None):
"""Initialize PDF instance.
Parameters:
- central_value: vector of means, shape (n)
- covariance: covariance matrix, shape (n,n)
"""
if covariance is not None:
self.covariance = covariance
self.standard_deviation = np.sqrt(np.diag(self.covariance))
self.correlation = self.covariance/np.outer(self.standard_deviation,
self.standard_deviation)
np.fill_diagonal(self.correlation, 1.)
else:
if standard_deviation is None:
raise ValueError("You must specify either covariance or standard_deviation")
self.standard_deviation = np.array(standard_deviation)
if correlation is None:
self.correlation = np.eye(len(self.standard_deviation))
else:
if isinstance(correlation, (int, float)):
# if it's a number, return delta_ij + (1-delta_ij)*x
n_dim = len(central_value)
self.correlation = np.eye(n_dim) + (np.ones((n_dim, n_dim))-np.eye(n_dim))*float(correlation)
else:
self.correlation = np.array(correlation)
self.covariance = np.outer(self.standard_deviation,
self.standard_deviation)*self.correlation
super().__init__(central_value, support=np.array([
np.asarray(central_value) - 6*self.standard_deviation,
np.asarray(central_value) + 6*self.standard_deviation
]))
# to avoid ill-conditioned covariance matrices, all data are rescaled
# by the inverse variances
self.err = np.sqrt(np.diag(self.covariance))
self.scaled_covariance = self.covariance / np.outer(self.err, self.err)
assert np.all(np.linalg.eigvals(self.scaled_covariance) >
0), "The covariance matrix is not positive definite!" + str(covariance)
def __repr__(self):
return 'flavio.statistics.probability.MultivariateNormalDistribution' + \
'({}, {})'.format(self.central_value, self.covariance)
def get_random(self, size=None):
"""Get `size` random numbers (default: a single one)"""
return np.random.multivariate_normal(self.central_value, self.covariance, size)
def reduce_dimension(self, exclude=None):
"""Return a different instance where certain dimensions, specified by
the iterable of integers `exclude`, are removed from the covariance.
If `exclude` contains all indices but one, an instance of
`NormalDistribution` will be returned.
"""
if not exclude:
return self
# if parameters are to be excluded, construct a
# distribution with reduced mean vector and covariance matrix
_cent_ex = np.delete(self.central_value, exclude)
_cov_ex = np.delete(
np.delete(self.covariance, exclude, axis=0), exclude, axis=1)
if len(_cent_ex) == 1:
# if only 1 dimension remains, can use a univariate Gaussian
_dist_ex = NormalDistribution(
central_value=_cent_ex[0], standard_deviation=np.sqrt(_cov_ex[0, 0]))
else:
# if more than 1 dimension remains, use a (smaller)
# multivariate Gaussian
_dist_ex = MultivariateNormalDistribution(
central_value=_cent_ex, covariance=_cov_ex)
return _dist_ex
def logpdf(self, x, exclude=None):
"""Get the logarithm of the probability density function.
Parameters:
- x: vector; position at which PDF should be evaluated
- exclude: optional; if an iterable of integers is given, the parameters
at these positions will be removed from the covariance before
evaluating the PDF, effectively ignoring certain dimensions.
"""
if exclude is not None:
# if parameters are to be excluded, construct a temporary
# distribution with reduced mean vector and covariance matrix
# and call its logpdf method
_dist_ex = self.reduce_dimension(exclude=exclude)
return _dist_ex.logpdf(x)
# undoing the rescaling of the covariance
pdf_scaled = scipy.stats.multivariate_normal.logpdf(
x / self.err, self.central_value / self.err, self.scaled_covariance)
sign, logdet = np.linalg.slogdet(self.covariance)
return pdf_scaled + (np.linalg.slogdet(self.scaled_covariance)[1] - np.linalg.slogdet(self.covariance)[1]) / 2.
def get_error_left(self, nsigma=1):
"""Return the lower errors"""
return nsigma * self.err
def get_error_right(self, nsigma=1):
"""Return the upper errors"""
return nsigma * self.err
def get_cov_mat(self):
"""Return the covariance matrix"""
return self.covariance
class MultivariateNumericalDistribution(ProbabilityDistribution):
"""A multivariate distribution with PDF specified numerically."""
def __init__(self, xi, y, central_value=None):
"""Initialize a multivariate numerical distribution.
Parameters:
- `xi`: for an N-dimensional distribution, a list of N 1D arrays
specifiying the grid in N dimensions. The 1D arrays must contain
real, evenly spaced values in strictly ascending order (but the
spacing can be different for different dimensions). Any of the 1D
arrays can also be given alternatively as a list of two numbers, which
will be assumed to be the upper and lower boundaries, while the
spacing will be determined from the shape of `y`.
- `y`: PDF values on the grid defined by the `xi`. If the N `xi` have
length M1, ..., MN, `y` has dimension (M1, ..., MN). This is the same
shape as the grid obtained from `numpy.meshgrid(*xi, indexing='ij')`.
- central_value: if None (default), will be set to the mode of the
distribution, i.e. the N-dimensional xi-vector where y is largest
(by looking up the input arrays, i.e. without interpolation!)
"""
for x in xi:
# check that grid spacings are even up to per mille precision
d = np.diff(x)
if abs(np.min(d)/np.max(d)-1) > 1e-3:
raise ValueError("Grid must be evenly spaced per dimension")
self.xi = [np.asarray(x) for x in xi]
self.y = np.asarray(y)
for i, x in enumerate(xi):
if len(x) == 2:
self.xi[i] = np.linspace(x[0], x[1], self.y.shape[i])
if central_value is not None:
super().__init__(central_value=central_value,
support=(np.asarray(self.xi).T[0], np.asarray(self.xi).T[-1]))
else:
# if no central value is specified, set it to the mode
mode_index = (slice(None),) + np.unravel_index(self.y.argmax(), self.y.shape)
mode = np.asarray(np.meshgrid(*self.xi, indexing='ij'))[mode_index]
super().__init__(central_value=mode, support=None)
_bin_volume = np.prod([x[1] - x[0] for x in self.xi])
self.y_norm = self.y / np.sum(self.y) / _bin_volume # normalize PDF to 1
# ignore warning from log(0)=-np.inf
with np.errstate(divide='ignore', invalid='ignore'):
# logy = np.nan_to_num(np.log(self.y_norm))
logy = np.log(self.y_norm)
logy[np.isneginf(logy)] = -1e100
self.logpdf_interp = RegularGridInterpolator(self.xi, logy,
fill_value=-np.inf, bounds_error=False)
# the following is needed for get_random: initialize to None
self._y_flat = None
self._cdf_flat = None
def __repr__(self):
return 'flavio.statistics.probability.MultivariateNumericalDistribution' + \
'({}, {}, {})'.format([x.tolist() for x in self.xi], self.y.tolist(), list(self.central_value))
def get_random(self, size=None):
"""Draw a random number from the distribution.
If size is not None but an integer N, return an array of N numbers.
For the MultivariateNumericalDistribution, the PDF from which the
random numbers are drawn is approximated to be piecewise constant in
hypercubes around the points of the lattice spanned by the `xi`. A finer
lattice spacing will lead to a smoother distribution of random numbers
(but will also be slower).
"""
if size is None:
return self._get_random()
else:
return np.array([self._get_random() for i in range(size)])
def _get_random(self):
# if these have not been initialized, do it (once)
if self._y_flat is None:
# get a flattened array of the PDF
self._y_flat = self.y.flatten()
if self._cdf_flat is None:
# get the (discrete) 1D CDF
_cdf_flat = np.cumsum(self._y_flat)
# normalize to 1
self._cdf_flat = _cdf_flat/_cdf_flat[-1]
| |
<gh_stars>1-10
# !/usr/bin/env python
# -*- coding:utf-8 _*-
# @Author: swang
# @Contact: <EMAIL>
# @Project Name: keyword_spotting_system
# @File: test.py
# @Time: 2021/11/11/21:51
# @Software: PyCharm
import os, sys
CRT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(CRT_DIR)
# print('sys.path:', sys.path)
import time
import json
import numpy as np
import threading
import multiprocessing
from pyaudio import PyAudio
from collections import deque # , BlockingQueue
from queue import Queue
from SoundSourceLocalization.SSL_Settings import *
from SoundSourceLocalization.mylib.utils import standard_normalizaion
# from SoundSourceLocalization.lib.audiolib import normalize_single_channel_audio, audio_segmenter_4_numpy, \
# audio_energy_ratio_over_threshold, audio_energy_over_threshold, audiowrite, audioread
import tensorflow.compat.v1 as tf
from kws_streaming.layers import modes
from kws_streaming.models import models
from kws_streaming.models import utils
from kws_streaming.train import inference
from scipy.special import softmax as scipy_softmax
WORD_QUEUE_MAX_LENGTH = None
WORD_QUEUE = deque() # 最大长度将在KWS中获得检测步长后修正
WORD_QUEUE_UPDATA = False
AUDIO_QUEUE = Queue() # (maxsize=3 * self.clip_duration_ms) # TODO 存在内存溢出风险
SSL_AUDIO = []
SSL_AUDIO_UPDATE = False
class VoiceMenu(object):
def __init__(self):
# print('-' * 20, 'init VoiceMenu class', '-' * 20)
super(VoiceMenu, self).__init__()
self.keyword_ls = ['walker', 'voice', 'menu', 'redraw', 'the', 'map', 'charge', 'start', 'sleep',
'off', 'hand', 'operation', 'yes', 'no', ]
self.walker_name = 'walker'
self.menu_name = 'voice menu'
self.command_ls = ['voice menu', 'redraw map', 'charge', 'start',
'sleep', 'voice menu off', 'hand operation', 'yes', 'no', ]
self.affirm_ls = ['yes', 'no']
self.wait_action_ls = ['redraw map', 'charge', ] # 不考虑 voice menu
self.instant_action_ls = ['start', 'sleep', 'voice menu off', 'hand operation', ] # 不考虑 voice menu
self.excluded_ls = ['silence', 'unknown', ]
self.action_ls = self.wait_action_ls + self.instant_action_ls
self.wait_time = 10 # s 'inf' for waiting forever
self.last_command = None
self.last_command_time = None
global MAX_COMMAND_SECONDS
self.command_interval = MAX_COMMAND_SECONDS + 0.1 # s
def detect_command(self, command_ls, wait_time):
# menu_name is detected by default, even if it isn't in command_ls
global WORD_QUEUE, WORD_QUEUE_UPDATA, WORD_QUEUE_MAX_LENGTH
# command_ls = command_ls if ('voice menu off' in command_ls) else command_ls + ['voice menu off']
start_time = time.time()
returnCommand = False
waiting_off = False # 用来区分 'voice menu' 和 'voice menu off'
while True:
if (wait_time != 'inf') and (time.time() - start_time > wait_time):
return None
if not (WORD_QUEUE_UPDATA and (len(WORD_QUEUE) == WORD_QUEUE_MAX_LENGTH)):
time.sleep(0.1)
WORD_QUEUE_UPDATA = False
word_ls = list(WORD_QUEUE)
# 生成命令
command = self.convert_word_to_command(word_ls, command_ls)
# 重复命令处理
if (command is not None) and (command != self.menu_name):
if command != self.last_command:
returnCommand = True
else:
assert self.last_command_time is not None, ('param {last_command_time} was not updated by mistake')
if time.time() - self.last_command_time > self.command_interval:
returnCommand = True
else:
continue
elif command == self.menu_name:
waiting_off = True
elif command is None:
if waiting_off: # 未等到off,已结束输入
returnCommand = True
command = self.menu_name
else:
continue
else:
print('Warning: Bad case exists!')
if returnCommand:
self.last_command = command
self.last_command_time = time.time()
WORD_QUEUE.clear()
print('command:', command)
return command
def convert_word_to_command(self, word_ls, command_ls): # TODO 待优化,考虑有序列表
# word_ls = [('voice', 0.9971545), ('voice', 0.99989796), ('voice', 0.99968916), ('voice', 0.983763),
# ('menu', 0.86595213), ('menu', 0.9521046), ('menu', 0.82160306)]
word_ls = [i for i in word_ls if ((i[0] not in self.excluded_ls) and (i[1] > 0.7))]
if not len(word_ls):
return None
words, probs = list(zip(*word_ls))
words, probs = np.asarray(words), np.asarray(probs)
uni_words = np.unique(words)
uni_probs = []
for wd in uni_words:
uni_probs.append(probs[words == wd].mean())
uni_probs = np.asarray(uni_probs)
candi_cmd_ls = []
for cmd in command_ls:
cmd_set = set(cmd.split(' '))
if cmd_set.issubset(uni_words): # 不考虑顺序
candi_cmd_ls.append(cmd)
if ('voice menu off' in candi_cmd_ls) and ('voice menu' in candi_cmd_ls):
candi_cmd_ls.remove('voice menu')
if len(candi_cmd_ls) == 0:
return None
elif len(candi_cmd_ls) == 1:
return candi_cmd_ls[0]
else:
# 从多个候选命令中挑出一个
cmd_prob_ls = []
for cmd in candi_cmd_ls:
wds = cmd.split(' ')
cmd_prob = [uni_probs[uni_words == wd] for wd in wds]
cmd_prob_ls.append(np.mean(cmd_prob))
return candi_cmd_ls[np.argmax(cmd_prob_ls)]
def broadcast(self, command, level=None):
'''
Args:
command:
level: level of broadcasting
1: Sure to ... ;
2: Will ... automatically in half a minute. Say "No" or press the emergency button to cancel;
3: Complete ...
'''
if command == None:
print('Broadcast: Time out. And exit the voice menu automatically.')
# elif command == self.walker_name:
# print(f'Broadcast: walker_name (\'{self.walker_name}\') is detected.')
elif command == self.menu_name:
print('Broadcast: Voice menu started.')
elif command in self.wait_action_ls:
if level == 1:
print(f'Broadcast: Sure to {command} ?')
elif level == 2:
print(
f'Broadcast: Will {command} automatically in half a minute. \n\t\t\tSay "No" or press the emergency button to cancel?')
elif level == 3:
print(f'Broadcast: {command} was completed')
else:
print(f'Warning: Level ({level}) is illegal!')
elif command in self.instant_action_ls:
if level == 1:
print(f'Broadcast: Sure to {command} ?')
elif level == 2:
print(f'Broadcast: {command} was completed')
else:
print(f'Warning: Level ({level}) is illegal!')
else:
print('-' * 20, f'Warning: Unknow command -> {command}!', '-' * 20)
def run(self):
# streaming KWS model detects keywords all the time
while True:
time.sleep(0.1)
name = self.detect_command([self.menu_name, ], 'inf')
if name != self.menu_name:
# print(f'Warning: Will skip \'{name}\' while waiting for menu_name({self.menu_name})')
continue
while True: # voice menu started
self.broadcast(self.menu_name, )
action = self.detect_command(self.action_ls + [self.menu_name], self.wait_time)
if action == None: # 超时,返回监听 voice menu
self.broadcast(action, )
break
elif action == self.menu_name:
continue
elif action in self.instant_action_ls:
self.broadcast(action, level=1)
affirm = self.detect_command(self.affirm_ls + [self.menu_name], self.wait_time)
if affirm == 'yes':
self.broadcast(action, level=2)
return action
elif affirm in ['no', self.menu_name, None]:
continue
else:
print(f'Warning: Error detection -> \'{affirm}\' \
while detecing affirm({self.affirm_ls + [self.menu_name]})')
elif action in self.wait_action_ls:
self.broadcast(action, level=1)
affirm = self.detect_command(self.affirm_ls + [self.menu_name], self.wait_time)
if affirm in ['no', self.menu_name, None]:
continue
elif affirm == 'yes':
self.broadcast(action, level=2)
reaffirm = self.detect_command(['no'] + [self.menu_name], self.wait_time)
if reaffirm in ['no', self.menu_name]:
continue
elif reaffirm == None:
self.broadcast(action, level=3)
return action
else:
print(f'Warning: Error detection -> \'{reaffirm}\' while detecing reaffirm')
else:
print(f'Warning: Error detection -> \'{affirm}\' while detecing affirm({self.affirm_ls})')
else:
print(f'Warning: Error detection -> \'{action}\' while detecing action({self.action_ls})')
def run_forever(self, ):
while True:
self.run()
class KeyWordSpotting(object):
def __init__(self, use_stream=False):
# print('-' * 20, 'init KeyWordSpotting class', '-' * 20)
super(KeyWordSpotting, self).__init__()
self.use_stream = use_stream
assert self.use_stream == False # 暂时不考虑流式模型
self.model_name = 'ds_tc_resnet_cpu_causal_20211231-200734'
self.model_dir = os.path.abspath(os.path.join(CRT_DIR, '../model', self.model_name, ))
self.flags_path = os.path.join(self.model_dir, 'flags.json')
self.flags = self.__load__flags__()
self.flags.batch_size = 1
print('-' * 20, 'Loading KWS non_stream_model...', '-' * 20, )
self.non_stream_model = self.__load_non_stream_model__(weights_name='last_weights')
if self.use_stream: # TODO 保存流式模型,直接加载?而非每次都要转换,还挺耗时的
self.stream_model = self.__convert_2_stream_model__()
self.labels = np.array(['silence', 'unknown', ] + self.flags.wanted_words.split(','))
self.walker_name = self.labels[2]
print('-' * 20, 'KWS labels:', ' '.join(self.labels), '-' * 20)
print('-' * 20, 'KWS walker_name:', self.walker_name, '-' * 20)
self.clip_duration_ms = int(self.flags.clip_duration_ms)
assert self.clip_duration_ms == int(CLIP_MS)
if self.use_stream:
self.window_stride_ms = int(self.flags.window_stride_ms)
else:
self.window_stride_ms = WINDOW_STRIDE_MS
global WORD_QUEUE, WORD_QUEUE_MAX_LENGTH
WORD_QUEUE_MAX_LENGTH = MAX_COMMAND_SECONDS * 1000 // self.window_stride_ms
WORD_QUEUE = deque(maxlen=WORD_QUEUE_MAX_LENGTH)
def __load__flags__(self, ):
with open(self.flags_path, 'r') as load_f:
flags_json = json.load(load_f)
class DictStruct(object):
def __init__(self, **entries):
self.__dict__.update(entries)
self.flags = DictStruct(**flags_json)
return self.flags
def __load_non_stream_model__(self, weights_name):
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
# self.audio_processor = input_data.AudioProcessor(self.flags)
tf.keras.backend.set_learning_phase(0)
# tf.disable_eager_execution()
# print('tf.keras.backend.image_data_format():', tf.keras.backend.image_data_format())
tf.keras.backend.set_image_data_format('channels_last')
non_stream_model = models.MODELS[self.flags.model_name](self.flags)
weight_path = os.path.join(self.model_dir, weights_name, )
non_stream_model.load_weights(weight_path).expect_partial()
# non_stream_model.summary()
# tf.keras.utils.plot_model(
# non_stream_model,
# show_shapes=True,
# show_layer_names=True,
# expand_nested=True,
# to_file=os.path.join('./', self.model_name + '_non_stream.png'), )
#
return non_stream_model
def __convert_2_stream_model__(self, ):
print('tf stream model state internal without state resetting between testing sequences')
self.flags.data_shape = modes.get_input_data_shape(self.flags, modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)
stream_model = utils.to_streaming_inference(
self.non_stream_model, self.flags, modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)
# stream_model.summary()
# tf.keras.utils.plot_model(
# stream_model,
# show_shapes=True,
# show_layer_names=True,
# expand_nested=True,
# to_file=os.path.join('./', self.model_name + '_stream.png'),
# )
return stream_model
def predict(self, x, use_stream=None):
use_stream = self.use_stream if (use_stream is None) else use_stream
if use_stream:
y_pred = inference.run_stream_inference_classification(self.flags, self.stream_model, x)
else:
y_pred = self.non_stream_model.predict(x)
y_pred = scipy_softmax(y_pred, axis=-1)
label = np.argmax(y_pred, axis=-1)
return label, y_pred[:, label].squeeze(axis=0)
def run(self, ):
global WORD_QUEUE, WORD_QUEUE_UPDATA, AUDIO_QUEUE, SSL_AUDIO, SSL_AUDIO_UPDATE
if not self.use_stream:
local_audio_frames = deque(maxlen=self.clip_duration_ms)
while True:
for i in range(self.window_stride_ms):
local_audio_frames.append(AUDIO_QUEUE.get(block=True, timeout=None))
if len(local_audio_frames) != self.clip_duration_ms:
continue
################################ predict ########################################
audio = np.concatenate(local_audio_frames, axis=1)
# audio = normalize_single_channel_to_target_level(audio, )
x = np.array(audio[0], dtype=np.float64)[np.newaxis, :]
y, prob = self.predict(x, use_stream=self.use_stream)
y, prob = self.labels[y[0]], prob[0]
WORD_QUEUE.append((y, prob))
WORD_QUEUE_UPDATA = True
# if (y not in ['silence', 'unknown', ]) and prob > 0.70:
# # print('y & prob:', y, round(prob, 3), end='\t')
# print(y, round(prob, 3), end='\t')
if y == self.walker_name: # 监听到 walker_name,将音频传给声源定位模块
print(f'KWS: walker_name (\'{self.walker_name}\') is detected.')
SSL_AUDIO = (audio, y, prob) # (音频,文本,概率)
SSL_AUDIO_UPDATE = | |
types.CodeType = compile(code, f.__code__.co_filename, "exec")
for const in compiled.co_consts:
if (
isinstance(const, types.CodeType)
and const.co_name == f.__code__.co_name
):
f.__code__ = const
break
@functools.wraps(f)
def instrumented_f(*args, **kwargs):
with self.tracing_enabled(tracing_enabled_file=f_defined_file):
return f(*args, **kwargs)
return instrumented_f
def __call__(self, code: Union[str, ast.Module, ast.stmt, Callable]):
if isinstance(code, (str, ast.AST)):
return self.exec(code, num_extra_lookback_frames=1)
else:
return self.instrumented(code)
def __getitem__(self, code: Union[str, ast.Module, ast.stmt, Callable]):
return self(code)
def enter_tracing_hook(self) -> None:
pass
def exit_tracing_hook(self) -> None:
pass
def _static_init_module_impl(self, node: ast.Module) -> None:
self.current_module[0] = node
self.static_init_module(node)
def static_init_module(self, node: ast.Module) -> None:
pass
def _make_tracing_context_cleanup_callback(self):
orig_num_sandbox_calls_seen = self._num_sandbox_calls_seen
orig_hard_disabled = self._is_tracing_hard_disabled
orig_exec_saved_thunk = getattr(builtins, EXEC_SAVED_THUNK, None)
orig_sandbox_fname = self._current_sandbox_fname
orig_tracing_enabled_files = self._tracing_enabled_files
def cleanup(should_push: bool, will_enable_tracing: bool) -> None:
self._tracing_enabled_files = orig_tracing_enabled_files
self._current_sandbox_fname = orig_sandbox_fname
self._is_tracing_hard_disabled = orig_hard_disabled
self._num_sandbox_calls_seen = orig_num_sandbox_calls_seen
if should_push:
del _TRACER_STACK[-1]
if will_enable_tracing:
self._disable_tracing(check_enabled=False)
if should_push:
self.exit_tracing_hook()
if len(_TRACER_STACK) == 0:
for extra_builtin in {
EMIT_EVENT,
EXEC_SAVED_THUNK,
TRACE_LAMBDA,
TRACING_ENABLED,
} | self.guards:
if hasattr(builtins, extra_builtin):
delattr(builtins, extra_builtin)
elif orig_exec_saved_thunk is not None:
setattr(builtins, EXEC_SAVED_THUNK, orig_exec_saved_thunk)
return cleanup
@contextmanager
def tracing_context(
self, disabled: bool = False, tracing_enabled_file: Optional[str] = None
) -> Generator[None, None, None]:
cleanup_callback = None
try:
cleanup_callback = self.tracing_non_context(
disabled=disabled, tracing_enabled_file=tracing_enabled_file
)
yield
finally:
if cleanup_callback is not None:
cleanup_callback()
def tracing_non_context(
self, disabled: bool = False, tracing_enabled_file: Optional[str] = None
) -> Callable:
cleanup_callback_impl = self._make_tracing_context_cleanup_callback()
do_patch_meta_path = False
should_push = self not in _TRACER_STACK
self._is_tracing_hard_disabled = disabled
will_enable_tracing = (
not self._is_tracing_hard_disabled and not self._is_tracing_enabled
)
def first_cleanup_callback():
return cleanup_callback_impl(should_push, will_enable_tracing)
all_cleanup_callbacks = [first_cleanup_callback]
def cleanup_callback():
for cleanup in reversed(all_cleanup_callbacks):
cleanup()
if tracing_enabled_file is not None:
self._current_sandbox_fname = tracing_enabled_file
self._tracing_enabled_files = self._tracing_enabled_files | {
tracing_enabled_file
}
if getattr(builtins, EMIT_EVENT, None) is not _emit_event:
setattr(builtins, EMIT_EVENT, _emit_event)
for guard in self.guards:
self.deactivate_guard(guard)
if not hasattr(builtins, TRACING_ENABLED):
setattr(builtins, TRACING_ENABLED, False)
setattr(builtins, EXEC_SAVED_THUNK, self.exec_saved_thunk)
setattr(builtins, TRACE_LAMBDA, self.trace_lambda)
if len(_TRACER_STACK) == 0:
do_patch_meta_path = True
if should_push:
_TRACER_STACK.append(self) # type: ignore
do_patch_sys_settrace = self.has_sys_trace_events and will_enable_tracing
if do_patch_meta_path:
all_cleanup_callbacks.append(patch_meta_path_non_context(_TRACER_STACK))
if do_patch_sys_settrace:
all_cleanup_callbacks.append(self._patch_sys_settrace_non_context())
if will_enable_tracing:
self._enable_tracing()
if should_push:
self.enter_tracing_hook()
return cleanup_callback
def preprocess(self, code: str, rewriter: AstRewriter) -> str:
for augmenter in self.make_syntax_augmenters(rewriter):
code = augmenter(code)
return code
def parse(self, code: str, mode="exec") -> Union[ast.Module, ast.Expression]:
rewriter = self.make_ast_rewriter()
for tracer in _TRACER_STACK:
code = tracer.preprocess(code, rewriter)
return rewriter.visit(ast.parse(code, mode=mode))
def exec_raw(
self,
code: Union[ast.Module, ast.Expression, str],
global_env: dict,
local_env: dict,
filename: str = SANDBOX_FNAME,
instrument: bool = True,
do_eval: bool = False,
) -> Any:
with self.tracing_context(
disabled=self._is_tracing_hard_disabled,
tracing_enabled_file=filename,
) if instrument else suppress():
if isinstance(code, str):
code = textwrap.dedent(code).strip()
code = self.parse(code)
if instrument:
code = self.make_ast_rewriter().visit(code)
code_obj = compile(code, filename, "eval" if do_eval else "exec")
if do_eval:
self._num_sandbox_calls_seen = 2
return eval(code_obj, global_env, local_env)
else:
return exec(code_obj, global_env, local_env)
@staticmethod
def _get_environments(
global_env: Optional[dict],
local_env: Optional[dict],
num_extra_lookback_frames: int,
) -> Tuple[dict, dict]:
frame = None
if global_env is None or local_env is None:
frame = sys._getframe().f_back
for _ in range(num_extra_lookback_frames):
frame = frame.f_back
if global_env is None:
global_env = frame.f_globals
if local_env is None:
local_env = frame.f_locals
return global_env, local_env
def eval(
self,
code: Union[str, ast.expr, ast.Expression],
global_env: Optional[dict] = None,
local_env: Optional[dict] = None,
*,
instrument: bool = True,
filename: str = SANDBOX_FNAME,
num_extra_lookback_frames: int = 0,
) -> Any:
global_env, local_env = self._get_environments(
global_env, local_env, num_extra_lookback_frames + 1
)
with self.tracing_context(
disabled=self._is_tracing_hard_disabled,
tracing_enabled_file=filename,
) if instrument else suppress():
visited = False
if isinstance(code, str):
if instrument:
visited = True
code = cast(ast.Expression, self.parse(code, mode="eval"))
else:
code = cast(ast.Expression, ast.parse(code, mode="eval"))
if not isinstance(code, ast.Expression):
code = ast.Expression(code)
if instrument and not visited:
code = self.make_ast_rewriter().visit(code)
return self.exec_raw(
code, # type: ignore
global_env=global_env,
local_env=local_env,
filename=filename,
instrument=False,
do_eval=True,
)
def exec(
self,
code: Union[str, ast.Module, ast.stmt],
global_env: Optional[dict] = None,
local_env: Optional[dict] = None,
*,
instrument: bool = True,
filename: str = SANDBOX_FNAME,
num_extra_lookback_frames: int = 0,
) -> Dict[str, Any]:
global_env, local_env = self._get_environments(
global_env, local_env, num_extra_lookback_frames + 1
)
# pytest inserts variables prepended with "@"; we don't want these
args_to_use = [
k for k in local_env.keys() if not k.startswith("@") and k != "__"
]
if len(args_to_use) > 0:
sandbox_args = ", ".join(["*"] + args_to_use + ["**__"])
else:
sandbox_args = "**__"
env_name = "__Xix_pyccolo_local_env"
fun_name = "__Xix_pyccolo_sandbox"
sandboxed_code: Union[ast.Module, str] = textwrap.dedent(
f"""
{env_name} = dict(locals())
def {fun_name}({sandbox_args}):
return locals()
{env_name} = {fun_name}(**{env_name})
{env_name}.pop("__", None)
{env_name}.pop("builtins", None)
"""
).strip()
sandboxed_code = ast.parse(cast(str, sandboxed_code), filename, "exec")
with self.tracing_context(
disabled=self._is_tracing_hard_disabled,
tracing_enabled_file=filename,
) if instrument else suppress():
visited = False
if isinstance(code, str):
code = textwrap.dedent(code).strip()
if instrument:
visited = True
code = cast(ast.Module, self.parse(code))
else:
code = cast(ast.Module, ast.parse(code))
if not isinstance(code, ast.Module):
assert isinstance(code, ast.stmt)
code = ast.Module([code])
if instrument and not visited:
code = self.make_ast_rewriter().visit(code)
# prepend the stuff before "return locals()"
fundef: ast.FunctionDef = cast(ast.FunctionDef, sandboxed_code.body[1])
if isinstance(code, ast.Module):
code_body: List[ast.stmt] = code.body
else:
assert isinstance(code, ast.stmt)
code_body = [code]
fundef.body = code_body + fundef.body
self.exec_raw(
sandboxed_code,
global_env=global_env,
local_env=local_env,
filename=filename,
instrument=False,
)
return local_env.pop(env_name)
def trace_lambda(self, lam):
# for now, this is primarily so that we can distinguish between
# lambdas that we generate vs that user generates
code: CodeType = lam.__code__
assert code.co_name == "<lambda>"
if sys.version_info >= (3, 8):
lam.__code__ = code.replace(co_name="<traced_lambda>")
else:
# replace(...) not available for older python but CodeType
# constructor is stable at least
lam.__code__ = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
"<traced_lambda>",
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
return lam
def exec_saved_thunk(self):
assert self._saved_thunk is not None
thunk, self._saved_thunk = self._saved_thunk, None
if thunk is not Pass:
return self.exec(thunk, instrument=False, num_extra_lookback_frames=1)
def execute(self, *args, **kwargs):
return self.exec(*args, **kwargs)
def _should_attempt_to_reenable_tracing(self, frame: FrameType) -> bool:
return NotImplemented
def _sys_tracer(self, frame: FrameType, evt: str, arg: Any, **__):
if not self._file_passes_filter_impl(evt, frame.f_code.co_filename):
return None
if evt == "call" and frame.f_code.co_filename == self.defined_file:
func_name = frame.f_code.co_name
if func_name == self.allow_reentrant_events.__name__:
return None
elif func_name in self._handler_names and not self.allow_reentrant_events():
return None
if self._has_fancy_sys_tracing and evt == "call":
if TraceEvent.line not in self.events_with_registered_handlers:
frame.f_trace_lines = False # type: ignore
if TraceEvent.opcode in self.events_with_registered_handlers:
frame.f_trace_opcodes = True # type: ignore
return self._emit_event(evt, None, frame, ret=arg)
if TYPE_CHECKING:
TracerT = TypeVar("TracerT", bound="_InternalBaseTracer")
@classmethod
def instance(cls: Type[TracerT], *args, **kwargs) -> TracerT:
...
@classmethod
def clear_instance(cls) -> None:
...
def register_handler(
event: Union[
Union[TraceEvent, Type[ast.AST]], Tuple[Union[TraceEvent, Type[ast.AST]], ...]
],
when: Optional[Union[Callable[..., bool], Predicate]] = None,
reentrant: bool = False,
use_raw_node_id: bool = False,
guard: Optional[Callable[[ast.AST], str]] = None,
):
events = event if isinstance(event, tuple) else (event,)
when = Predicate.TRUE if when is None else when
if isinstance(when, Predicate):
pred: Predicate = when
else:
pred = Predicate(when, use_raw_node_id=use_raw_node_id) # type: ignore
pred.use_raw_node_id = use_raw_node_id
if TraceEvent.opcode in events and sys.version_info < (3, 7):
raise ValueError("can't trace opcodes on Python < 3.7")
def _inner_registrar(handler):
for evt in events:
handler_spec = HandlerSpec(handler, use_raw_node_id, reentrant, pred, guard)
_InternalBaseTracer.EVENT_HANDLERS_PENDING_REGISTRATION[
AST_TO_EVENT_MAPPING[evt]
if type(evt) is type and issubclass(evt, ast.AST)
else evt
].append(handler_spec)
_InternalBaseTracer.handler_spec_by_id[id(handler_spec)] = handler_spec
return handler
return _inner_registrar
def __event_call__(self, handler=None, **kwargs):
if handler is None:
def _register_func(_handler):
return register_handler(self, **kwargs)(_handler)
return _register_func
else:
if len(kwargs) > 0:
raise ValueError(
"keyword arguments not supported for simple handler registration"
)
return register_handler(self)(handler)
TraceEvent.__call__ = __event_call__ # type: ignore
def register_raw_handler(
event: Union[
Union[TraceEvent, Type[ast.AST]], Tuple[Union[TraceEvent, Type[ast.AST]], ...]
],
**kwargs,
):
return register_handler(event, use_raw_node_id=True, **kwargs)
def skip_when_tracing_disabled(handler):
@functools.wraps(handler)
def skipping_handler(self, *args, **kwargs):
if not self._is_tracing_enabled:
return
return handler(self, *args, **kwargs)
return skipping_handler
def register_universal_handler(handler):
return register_handler(tuple(evt for evt in TraceEvent))(handler)
class BaseTracer(_InternalBaseTracer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._saved_slice: Optional[Any] = None
@register_raw_handler(
(
TraceEvent.before_subscript_load,
TraceEvent.before_subscript_store,
TraceEvent.before_subscript_del,
),
reentrant=True,
)
def _save_slice_for_later(self, *_, attr_or_subscript: Any, **__):
self._saved_slice = attr_or_subscript
@register_raw_handler(TraceEvent._load_saved_slice, reentrant=True)
def _load_saved_slice(self, *_, **__):
ret = self._saved_slice
self._saved_slice = None
return ret
@classmethod
def is_outer_stmt(cls, node_or_id, exclude_outer_stmt_types=None):
node_id = node_or_id if isinstance(node_or_id, int) else id(node_or_id)
containing_stmt = cls.containing_stmt_by_id.get(node_id, None)
parent_stmt = cls.parent_stmt_by_id.get(
node_id if containing_stmt is None else id(containing_stmt), None
)
outer_stmts_to_consider = tuple(
{ast.If, ast.Try, ast.With, ast.AsyncWith}
- (exclude_outer_stmt_types or set())
)
while parent_stmt is not None and isinstance(
| |
# -*- coding: utf-8 -*-
from applications.opentree.modules.opentreewebapputil import(
get_opentree_services_method_urls,
extract_nexson_from_http_call,
fetch_github_app_auth_token,
get_maintenance_info)
# N.B. This module is shared with tree-browser app, which is aliased as
# 'opentree'. Any name changes will be needed here as well!
from peyotl.manip import merge_otus_and_trees, iter_trees
import requests
from pprint import pprint
import json
#import pdb
# this file is released under public domain and you can use without limitations
import re
#########################################################################
## This is a samples controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
def index():
"""
Show an introduction page for visitors, or personalized curation dashboard for
a logged-in user.
"""
#response.flash = T("Welcome to web2py!")
view_dict = get_opentree_services_method_urls(request)
view_dict['maintenance_info'] = get_maintenance_info(request)
if False: ## auth.is_logged_in():
# user is logged in, bounce to their personal dashboard
redirect(URL('dashboard'))
else:
# anonymous visitor, show a general info page
return view_dict
def collections():
"""
Show a filtered list of all tree collections in the system.
TODO: move to collection/index?
"""
view_dict = get_opentree_services_method_urls(request)
view_dict['maintenance_info'] = get_maintenance_info(request)
return view_dict
def error():
view_dict = get_opentree_services_method_urls(request)
return view_dict
@auth.requires_login()
def dashboard():
return dict(message="My Curation Activity")
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
def profile():
"""
shows a personalized profile for any user (default = the current logged-in user)
http://..../{app}/default/profile/[username]
"""
view_dict = get_opentree_services_method_urls(request)
view_dict['maintenance_info'] = get_maintenance_info(request)
# if the URL has a [username], try to load their information
if len(request.args) > 0:
# try to load a profile for the specified userid, using the GitHub API
specified_userid = request.args[0]
view_dict['userid'] = specified_userid
view_dict['active_user_found'] = False
# fetch the JSON for this user's activities
json_response = _fetch_github_api(verb='GET',
url='/users/{0}'.format(specified_userid))
error_msg = json_response.get('message', None)
view_dict['error_msg'] = error_msg
if error_msg:
# pass error to the page for display
print("ERROR FETCHING INFO FOR USERID: ", specified_userid)
print(error_msg)
view_dict['user_info'] = None
view_dict['opentree_activity'] = None
else:
# pass user info to the page for display
view_dict['user_info'] = json_response
activity = _get_opentree_activity(
userid=specified_userid,
username=view_dict['user_info'].get('name', specified_userid)
)
if activity:
view_dict['active_user_found'] = True
else:
view_dict['active_user_found'] = False
view_dict['error_msg'] = 'Not active in OpenTree'
view_dict['opentree_activity'] = activity
view_dict['is_current_user_profile'] = False
if view_dict['active_user_found'] == True and auth.is_logged_in():
current_userid = auth.user and auth.user.github_login or None
if specified_userid == current_userid:
view_dict['is_current_user_profile'] = True
return view_dict
else:
# No userid was provided in the URL. Instead, we should try to bounce to the
# current user's profile if they're logged in (or try to force a login).
if auth.is_logged_in():
current_userid = auth.user and auth.user.github_login or None
# redirect to the fully expanded profile URL
expanded_url = URL('curator', 'default', 'profile',
args=(current_userid,),
vars=request.vars)
redirect(expanded_url)
else:
# try to force a login and return here
redirect(URL('curator', 'user', 'login',
vars=dict(_next=URL(args=request.args, vars=request.vars))))
def _fetch_github_api(verb='GET', url=None, data=None):
# Wrapper for all (synchronous) calls to GitHub APIs
# 'verb' should be GET or POST (when in doubt, send GET headers below)
# 'url' should be root-relative (assume GitHub API)
# 'data' could be passed via GET [TODO] or POST
GH_BASE_URL = 'https://api.github.com'
# if the current user is logged in, use their auth token instead
USER_AUTH_TOKEN = auth.user and auth.user.github_auth_token or None
# Specify the media-type from GitHub, to freeze v3 API responses and get
# the comment body as markdown (vs. plaintext or HTML)
PREFERRED_MEDIA_TYPE = 'application/vnd.github.v3.raw+json, application/vnd.github.machine-man-preview+json'
# to get markdown AND html body, use 'application/vnd.github.v3.full+json'
if USER_AUTH_TOKEN:
auth_header_value = 'token %s' % USER_AUTH_TOKEN
else:
GITHUB_APP_INSTALLATION_TOKEN = fetch_github_app_auth_token(request)
auth_header_value = 'token %s' % GITHUB_APP_INSTALLATION_TOKEN
GH_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
GH_GET_HEADERS = {'Authorization': auth_header_value,
'Accept': PREFERRED_MEDIA_TYPE}
GH_POST_HEADERS = {'Authorization': auth_header_value,
'Content-Type': 'application/json',
'Accept': PREFERRED_MEDIA_TYPE}
url = '{0}{1}'.format(GH_BASE_URL, url)
if verb == 'POST':
resp = requests.post( url, headers=GH_POST_HEADERS)
else:
resp = requests.get( url, headers=GH_GET_HEADERS)
# Assume we always return JSON, even if it's an error message
return resp.json()
def _get_opentree_activity( userid=None, username=None ):
# Fetch information about a user's studies, comments, and collections in the
# OpenTree project. If a dict was provided, add this information to it; else
# bundle up the information and return it directly
if not userid:
return None
activity_found = False
activity = {
'curator_since': None,
'comments':[],
'issues': [],
'added_studies':[],
'curated_studies': [],
'curated_studies_in_synthesis': [],
'added_collections':[],
'curated_collections':[]
}
method_dict = get_opentree_services_method_urls(request)
# Use GitHub API to gather comments from this user, as shown in
# https://github.com/OpenTreeOfLife/feedback/issues/created_by/jimallman
# N.B. that this is limited to 100 most recent items!
all_comments = _fetch_github_api(verb='GET',
url='/repos/OpenTreeOfLife/feedback/issues/comments?per_page=100')
for comment in all_comments:
if comment.get('user', None):
comment_author = comment.get('user').get('login')
if comment_author == userid:
activity.get('comments').append( comment )
activity_found = True
# Again, for all feedback issues created by them
# N.B. that this is probably limited to 100 most recent items!
created_issues = _fetch_github_api(verb='GET',
url='/repos/OpenTreeOfLife/feedback/issues?state=all&creator={0}&per_page=100'.format(userid))
activity['issues'] = created_issues
if len(created_issues) > 0:
activity_found = True
# fetch a list of all studies that contribute to synthesis
fetch_url = method_dict['getSynthesisSourceList_url']
if fetch_url.startswith('//'):
# Prepend scheme to a scheme-relative URL
fetch_url = "https:%s" % fetch_url
# as usual, this needs to be a POST (pass empty fetch_args)
source_data = requests.post(
url=fetch_url,
headers={"Content-Type": "application/json"},
data=json.dumps({'include_source_list':True})
).json()
source_id_map = source_data.get('source_id_map')
# N.B. We can ignore the munged ids in source_data['source_list']
contributing_study_info = { } # build a dict with unique study IDs as keys, commit SHAs as values
for source_id, source_details in source_id_map.iteritems():
if 'taxonomy' in source_details:
continue
study_id = source_details.get('study_id')
commit_SHA_in_synthesis = source_details.get('git_sha')
contributing_study_info[ study_id ] = commit_SHA_in_synthesis
# Use oti to gather studies curated and created by this user.
fetch_url = method_dict['findAllStudies_url']
if fetch_url.startswith('//'):
# Prepend scheme to a scheme-relative URL
fetch_url = "https:%s" % fetch_url
all_studies = requests.post(
url=fetch_url,
headers={"Content-Type": "application/json"},
data=json.dumps({'verbose':True}) # include curator list
).json().get('matched_studies', [ ])
for study in all_studies:
study_curators = study['ot:curatorName']
# TODO: improve oti to handle multiple curator names!
if type(study_curators) is not list:
study_curators = [study_curators]
# NB - If there's no "display name" defined, look for their userid
if (username or userid) in study_curators:
activity_found = True
activity['curated_studies'].append(study)
# first curator name is its original contributor
if study_curators[0] == (username or userid):
activity['added_studies'].append(study)
# does this contribute to synthesis?
if contributing_study_info.has_key( study['ot:studyId'] ):
activity['curated_studies_in_synthesis'].append(study)
# Use oti to gather collections curated and created by this user.
fetch_url = method_dict['findAllTreeCollections_url']
if fetch_url.startswith('//'):
# Prepend scheme to a scheme-relative URL
fetch_url = "https:%s" % fetch_url
all_collections = requests.get(url=fetch_url).json()
for collection in all_collections:
# gather all participants and check against their GitHub userids
if userid == collection.get('creator', {}).get('login', None):
activity_found = True
activity['added_collections'].append(collection)
contributor_ids = [c.get('login', None) for c in collection.get('contributors', [ ])]
if userid in contributor_ids:
activity_found = True
activity['curated_collections'].append(collection)
if activity_found:
try:
# search the repo stats (for each phylesystem shard!) for their earliest contribution
earliest_activity_date = None # TODO: make this today? or tomorrow? MAXTIME?
fetch_url = method_dict['phylesystem_config_url']
if fetch_url.startswith('//'):
# Prepend scheme to a scheme-relative URL
fetch_url = "https:%s" % fetch_url
phylesystem_config = requests.get( url=fetch_url ).json()
shard_list = phylesystem_config['shards']
# if GitHub is rebuilding stats cache for any shard, poke them all but ignore dates
rebuilding_cache = False
for shard in shard_list:
shard_name = shard['name']
shard_contributors = _fetch_github_api(verb='GET',
url='/repos/OpenTreeOfLife/{0}/stats/contributors'.format(shard_name))
if type(shard_contributors) is not list:
# Flag this, but try to fetch remaining shards (to nudge the cache)
rebuilding_cache = True
else:
for contrib_info in shard_contributors:
if contrib_info['author']['login'] == userid:
# look for the earliest week here
for week in contrib_info['weeks']:
if earliest_activity_date:
earliest_activity_date = min(earliest_activity_date, week['w'])
else:
earliest_activity_date = week['w']
break # skip any remaining records
if rebuilding_cache:
activity['curator_since'] = 'Generating data, please try again in a moment...'
elif not earliest_activity_date:
activity['curator_since'] = 'This user has not curated any studies.'
else:
# show a very approximate date (stats are just weekly)
from datetime import datetime
d = datetime.fromtimestamp(earliest_activity_date)
activity['curator_since'] = d.strftime("%B %Y")
except:
# probably JSONDecodeError due to misconfiguration of the | |
'''
## NAME:
ProteinAnalysis.py
## LANGUAGE & VERSION:
python 3.8.5
## AUTHORS:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
## DATE:
November, 2021.
## DESCRIPTION & LOGIC:
This script uses BioPython tools numpy, pandas, seaborn,
matplotlib and argparse to run a functional protein analysis
between a protein query and series of proteins. In order to
asses functional relatedness to a query protein from the analyzed.
## USAGE:
ProteinAnalysis.py python 3.8.5
## ARGUMENTS & HELP:
Protein functional analysis and comparison
options:
-h, --help show this help message and exit
--int_matx_df Prints intersection-matrix dataframe
--means_df Prints means vector as dataframe.
--best Prints best match protein.
--heatmap Prints intersection-matrix heatmap.
-i INPUT [INPUT ...], --input INPUT [INPUT ...]
List of the proteins files paths, separated by
whitespace and comma
-d DISULFIDE, --disulfide DISULFIDE
Distance between S-S atoms. Write -1 to use the
default value: 8
-a ALPHA, --alpha ALPHA
Sequence pattern to search alpha helices. Use default
to search standard pattern
-b BETA, --beta BETA Sequence pattern to search beta sheets. Use default to
search standard pattern
-m MOTIF [MOTIF ...], --motif MOTIF [MOTIF ...]
Motif to search and minimal length of the motif
sequence
## INPUT - OUTPUT:
Input: List of protein files to be analyzed (-i,), distance between S-S atoms (-d),
sequence pattern to search alpha helices (-a), sequence pattern to search
beta sheets (-b), motif to search and minimal length of the motif (-m)
Output: Prints motifs found. Prints functional analysis stadistics, depending on the analysis argument
given (see ARGUMENTS & HELP). Saves my_protein object as an instance of ProtAnalysis class.
## EXAMPLES:
Input: (From Terminal)
python3 ProteinAnalysis.py -i 1kcw.pdb 1fat.pdb 3jbz.pdb 1kbe.pdb 4g68.pdb 1hp8.pdb
-d 2 -a default -b default -int_matx_df -means_df -best -heatmap
Query protein name as key (str): 1kbe
Output: (Std output)
{'name': '1kcw', 'num_bonds': 1, 'di_bonds': [[155, 181, 1.9980831, <Model id=0>, <Chain id=A>]]}
{'name': '1kcw', 'num_helix': 4, 'helix_seqs': [[['RIYHSHIDAPKD', 'KEKEKHIDRE', 'KVDKDNEDFQE', 'KVNKDDEEFIE'], <Model id=0>, <Chain id=A>]]}
(...)
{'name': '1hp8', 'num_bonds': 0, 'num_helix': 0, 'num_sheets': 1}
Proteins to be analyzed: [{'name': '1kcw', 'num_bonds': 1, 'num_helix': 4, 'num_sheets': 61}, {'name': '1fat', 'num_bonds': 0, 'num_helix': 4, 'num_sheets': 60}, {'name': '3jbz', 'num_bonds': 0, 'num_helix': 5, 'num_sheets': 58}, {'name': '1kbe', 'num_bonds': 0, 'num_helix': 0, 'num_sheets': 3}, {'name': '4g68', 'num_bonds': 0, 'num_helix': 3, 'num_sheets': 81}, {'name': '1hp8', 'num_bonds': 0, 'num_helix': 0, 'num_sheets': 1}]
Valid protein names are: ['1kcw', '1fat', '3jbz', '1kbe', '4g68', '1hp8']
Query protein name as key (str): 1kbe
-------- Starting analysis --------
Intersection matrix as np.array:
[[0. 0.04918033]
[0. 0.05 ]
[0. 0.05172414]
[0. 0. ]
[0. 0.03703704]
[0. 0.33333333]]
Intersection matrix's row means as np.array:
[0.02459016 0.025 0.02586207 0. 0.01851852 0.16666667]
Intersection matrix as dataframe:
num_helix num_sheets
1kcw 0.0 0.049180
1fat 0.0 0.050000
3jbz 0.0 0.051724
1kbe 0.0 0.000000
4g68 0.0 0.037037
1hp8 0.0 0.333333
Intersection matrix means vector as dataframe:
1kbe
1kcw 0.024590
1fat 0.025000
3jbz 0.025862
1kbe 0.000000
4g68 0.018519
1hp8 0.166667
Printing best match...
Best match 1kbe
1hp8 0.166667
Printing heatmap...
-------- Analysis compleyed --------
## SOFTWARE REQUIREMENTS:
python3
argparse
numpy
pandas
seaborn
matplotlib.pyplot
motifs.py
## FUNCTIONS: There are 8 functions, each one is necesary
for a step in the analysis. Their documentation is in
their docstrings. Many other functions are imported in
motifs from <motifs.py>
## EXTRA COMMENTS:
This script imports the module motifs.py
## LAST MODIFICATION:
<NAME> & <NAME>: November, 2021. [Creation]
## SOURCE:
GitHub:
https://github.com/phabel-LD/python_classII/
https://github.com/daianna21/python_class/
'''
##################################################################
# Libraries
##################################################################
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import motifs as mo
##################################################################
# Functions
##################################################################
# Dictionary of single letter code of aa necesaary for further analysis
aa_code = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
def comparison_intersection(protein_A, protein_B):
'''
This function makes the intersection comparsion between two
proteins.
Parameters:
protein_A (dict): Dictionary with info from the first protein,
including motif frequences.
protein_B (dict): Dictionary with info from the first protein,
including motif frequences.
Return:
intersections (np.array): Array with intersection matrix.
'''
motifs_A = np.array(protein_A.keys())
values_A = np.array(list(protein_A.values())[2:])
motifs_B = np.array(protein_B.keys())
values_B = np.array(list(protein_B.values())[2:])
# Start Vector pairwise comparison
intersections = []
for index in range(0, len(values_A)):
# Avoid division by 0.
if max(values_A[index], values_B[index]) == 0:
intersection = 0
else:
intersection = min(values_A[index], values_B[index])/ max(values_A[index], values_B[index])
intersections.append(intersection)
# Intersection vector is ready
intersections = np.array(intersections)
return(intersections)
def comparison_mean(protein_A, protein_B):
'''
This function makes the comparison mean from the instersection analysis
of two proteins
Parameters:
protein_A (dict): Dictionary with info from the first protein,
including motif frequences.
protein_B (dict): Dictionary with info from the first protein,
including motif frequences.
Return:
mean (np.array): Mean intersection value.
'''
intersections = comparison_intersection(protein_A, protein_B)
# Mean intersection value as numpy array for further analysis
mean = np.mean(intersections)
return(mean)
def intersections_matrix(proteins, query_prot):
'''
This function makes the intersection matrix (generalized comparisson)
between a query protein and a list of proteins.
Parameters:
proteins (list): A list of dictionaries, each with info. of a protein.
query_prot (dict): A dictionary with the info. of protein to be analyzed.
Return:
comparison_matrix (np.array): Comparison matrix with intersection values.
'''
# Initialize matrix
comparison_matrix = []
# Analyze every query protein
for index in range(0, len(proteins)):
if (proteins[index] == query_prot):
intersection = np.zeros(len(comparison_intersection(proteins[index], query_prot)))
else:
intersection = comparison_intersection(proteins[index], query_prot)
comparison_matrix.append(intersection)
# Create numpy array for further analysis
comparison_matrix = np.array(comparison_matrix)
return(comparison_matrix)
def intersections_means(proteins, query_prot):
'''
This function takes the row means from an intersection matrix.
Parameters:
proteins (list): A list of dictionaries, each with info. of a protein.
query_prot (dict): A dictionary with the info. of protein to be analyzed.
Return:
means (np.array): Array of 1D with the row means.
'''
# Get matrix
matrix = intersections_matrix(proteins, query_prot)
# Get rows means
means = np.mean(matrix, axis = 1)
return(means)
def plot_heatmap(proteins, query_prot):
'''
This function plots a heatmap from the info of an intersection matrix, onced
applied.
Parameters:
proteins (list): A list of dictionaries, each with info. of a protein.
query_prot (dict): A dictionary with the info. of protein to be analyzed.
Return: None
'''
# Get matrix
matrix = intersections_matrix(proteins, query_prot)
# Get x labels: motifs
x = [ motif for motif in list(query_prot.keys())[2:] ]
# Get y labels: proteins
y = [prot["name"] for prot in proteins]
# Get name of analyzed protein
query_name = query_prot["name"]
# Create Heatmap & plot
heatmap_matrix = sns.heatmap(matrix, cmap="YlGnBu",
xticklabels = x, yticklabels = y)
plt.title(f"{query_name} functional analysis")
plt.xlabel("Motifs")
plt.ylabel("Proteins")
plt.show()
return
def get_means_df(proteins, query_prot):
'''
This function takes the mean array from a intersection matrix an creates
a dataframe
Parameters:
proteins (list): A list of dictionaries, each with info. of a protein.
query_prot (dict): A dictionary with the info. of protein to be analyzed.
Return:
means_df (pd.dataframe): Dataframe with the row means.
'''
# Get means
means = intersections_means(proteins, query_prot)
# Create dataframe
means_df = pd.DataFrame(means.T, columns = [query_prot["name"]])
# Upgrae header and indexes
old_index = [x for x in means_df.index]
new_index = [prot["name"] for prot in proteins]
means_df = means_df.rename(index=dict(zip(old_index, new_index)))
return(means_df)
def intersect_matrix_df(proteins, query_prot):
'''
This function makes the intersection matrix (generalized comparisson)
between a query protein and a list of proteins and create a dataframe
from it.
Parameters:
proteins (list): A list of dictionaries, each with info. of a protein.
query_prot (dict): A dictionary with the info. of protein to be analyzed.
Return:
intersection_df (pd.dataframe): Dataframe of the comparison matrix.
'''
# Get intersection matrix
matrix = intersections_matrix(proteins, query_prot)
# Rename columns according to motif names
x = [ motif for motif in list(query_prot.keys())[2:] ]
# Create dataframe
intersection_df = pd.DataFrame(matrix, columns = x )
# Upgrade indexes
old_index = [x for x in intersection_df.index]
new_index = [prot["name"] for prot in proteins]
intersection_df = intersection_df.rename(index=dict(zip(old_index, new_index)))
return(intersection_df)
def print_best_match(proteins, query_prot):
'''
This function determines the analyzed protein's best match as the protein with the
highest | |
Disconnected_Boiler_BG_capacity_heating_W = 0
Disconnected_Boiler_NG_share_heating = 0
Disconnected_Boiler_NG_capacity_heating_W = 0
Disconnected_FC_share_heating = 0
Disconnected_FC_capacity_heating_W = 0
Disconnected_GHP_share_heating = 0
Disconnected_GHP_capacity_heating_W = 0
Disconnected_VCC_to_AHU_share_cooling = 0
Disconnected_VCC_to_AHU_capacity_cooling_W = 0
Disconnected_VCC_to_ARU_share_cooling = 0
Disconnected_VCC_to_ARU_capacity_cooling_W = 0
Disconnected_VCC_to_SCU_share_cooling = 0
Disconnected_VCC_to_SCU_capacity_cooling_W = 0
Disconnected_VCC_to_AHU_ARU_share_cooling = 0
Disconnected_VCC_to_AHU_ARU_capacity_cooling_W = 0
Disconnected_VCC_to_AHU_SCU_share_cooling = 0
Disconnected_VCC_to_AHU_SCU_capacity_cooling_W = 0
Disconnected_VCC_to_ARU_SCU_share_cooling = 0
Disconnected_VCC_to_ARU_SCU_capacity_cooling_W = 0
Disconnected_VCC_to_AHU_ARU_SCU_share_cooling = 0
Disconnected_VCC_to_AHU_ARU_SCU_capacity_cooling_W = 0
Disconnected_single_effect_ACH_to_AHU_share_FP_cooling = 0
Disconnected_single_effect_ACH_to_AHU_capacity_FP_cooling_W = 0
Disconnected_single_effect_ACH_to_AHU_share_ET_cooling = 0
Disconnected_single_effect_ACH_to_AHU_capacity_ET_cooling_W = 0
Disconnected_single_effect_ACH_to_ARU_share_FP_cooling = 0
Disconnected_single_effect_ACH_to_ARU_capacity_FP_cooling_W = 0
Disconnected_single_effect_ACH_to_ARU_share_ET_cooling = 0
Disconnected_single_effect_ACH_to_ARU_capacity_ET_cooling_W = 0
Disconnected_single_effect_ACH_to_SCU_share_FP_cooling = 0
Disconnected_single_effect_ACH_to_SCU_capacity_FP_cooling_W = 0
Disconnected_single_effect_ACH_to_SCU_share_ET_cooling = 0
Disconnected_single_effect_ACH_to_SCU_capacity_ET_cooling_W = 0
Disconnected_single_effect_ACH_to_AHU_ARU_share_FP_cooling = 0
Disconnected_single_effect_ACH_to_AHU_ARU_capacity_FP_cooling_W = 0
Disconnected_single_effect_ACH_to_AHU_ARU_share_ET_cooling = 0
Disconnected_single_effect_ACH_to_AHU_ARU_capacity_ET_cooling_W = 0
Disconnected_single_effect_ACH_to_AHU_SCU_share_FP_cooling = 0
Disconnected_single_effect_ACH_to_AHU_SCU_capacity_FP_cooling_W = 0
Disconnected_single_effect_ACH_to_AHU_SCU_share_ET_cooling = 0
Disconnected_single_effect_ACH_to_AHU_SCU_capacity_ET_cooling_W = 0
Disconnected_single_effect_ACH_to_ARU_SCU_share_FP_cooling = 0
Disconnected_single_effect_ACH_to_ARU_SCU_capacity_FP_cooling_W = 0
Disconnected_single_effect_ACH_to_ARU_SCU_share_ET_cooling = 0
Disconnected_single_effect_ACH_to_ARU_SCU_capacity_ET_cooling_W = 0
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_FP_cooling = 0
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_FP_cooling_W = 0
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_ET_cooling = 0
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_ET_cooling_W = 0
Disconnected_direct_expansion_to_AHU_share_cooling = 0
Disconnected_direct_expansion_to_AHU_capacity_cooling_W = 0
Disconnected_direct_expansion_to_ARU_share_cooling = 0
Disconnected_direct_expansion_to_ARU_capacity_cooling_W = 0
Disconnected_direct_expansion_to_SCU_share_cooling = 0
Disconnected_direct_expansion_to_SCU_capacity_cooling_W = 0
Disconnected_direct_expansion_to_AHU_SCU_share_cooling = 0
Disconnected_direct_expansion_to_AHU_SCU_capacity_cooling_W = 0
Disconnected_direct_expansion_to_AHU_ARU_share_cooling = 0
Disconnected_direct_expansion_to_AHU_ARU_capacity_cooling_W = 0
Disconnected_direct_expansion_to_ARU_SCU_share_cooling = 0
Disconnected_direct_expansion_to_ARU_SCU_capacity_cooling_W = 0
Disconnected_direct_expansion_to_AHU_ARU_SCU_share_cooling = 0
Disconnected_direct_expansion_to_AHU_ARU_SCU_capacity_cooling_W = 0
if network[i] == "0":
if config.optimization.isheating:
df = pd.read_csv(locator.get_optimization_disconnected_folder_building_result_heating(building_names[i]))
dfBest = df[df["Best configuration"] == 1]
Disconnected_Boiler_BG_share_heating = dfBest["BoilerBG Share"].iloc[0]
Disconnected_Boiler_NG_share_heating = dfBest["BoilerNG Share"].iloc[0]
Disconnected_FC_share_heating = dfBest["FC Share"].iloc[0]
Disconnected_GHP_share_heating = dfBest["GHP Share"].iloc[0]
if Disconnected_Boiler_BG_share_heating == 1:
Disconnected_Boiler_BG_capacity_heating_W = dfBest["Nominal Power"].iloc[0]
if Disconnected_Boiler_NG_share_heating == 1:
Disconnected_Boiler_NG_capacity_heating_W = dfBest["Nominal Power"].iloc[0]
if Disconnected_FC_share_heating == 1:
Disconnected_FC_capacity_heating_W = dfBest["Nominal Power"].iloc[0]
if Disconnected_GHP_share_heating == 1:
Disconnected_GHP_capacity_heating_W = dfBest["Nominal Power"].iloc[0]
if (Disconnected_FC_share_heating == 0 and Disconnected_Boiler_BG_share_heating == 0 and Disconnected_GHP_share_heating != 0 and Disconnected_Boiler_NG_share_heating != 0):
Disconnected_Boiler_NG_capacity_heating_W = dfBest["Nominal Power"].iloc[0] / Disconnected_Boiler_NG_share_heating
Disconnected_GHP_capacity_heating_W = dfBest["Nominal Power"].iloc[0] / Disconnected_GHP_share_heating
disconnected_capacity = dict(building_name=building_names[i],
Disconnected_Boiler_BG_share=Disconnected_Boiler_BG_share_heating,
Disconnected_Boiler_BG_capacity_W=Disconnected_Boiler_BG_capacity_heating_W,
Disconnected_Boiler_NG_share=Disconnected_Boiler_NG_share_heating,
Disconnected_Boiler_NG_capacity_W=Disconnected_Boiler_NG_capacity_heating_W,
Disconnected_FC_share=Disconnected_FC_share_heating,
Disconnected_FC_capacity_W=Disconnected_FC_capacity_heating_W,
Disconnected_GHP_share=Disconnected_GHP_share_heating,
Disconnected_GHP_capacity_W=Disconnected_GHP_capacity_heating_W,
Disconnected_VCC_to_AHU_share_cooling=Disconnected_VCC_to_AHU_share_cooling,
Disconnected_VCC_to_AHU_capacity_cooling_W=Disconnected_VCC_to_AHU_capacity_cooling_W,
Disconnected_VCC_to_ARU_share_cooling=Disconnected_VCC_to_ARU_share_cooling,
Disconnected_VCC_to_ARU_capacity_cooling_W=Disconnected_VCC_to_ARU_capacity_cooling_W,
Disconnected_VCC_to_SCU_share_cooling=Disconnected_VCC_to_SCU_share_cooling,
Disconnected_VCC_to_SCU_capacity_cooling_W=Disconnected_VCC_to_SCU_capacity_cooling_W,
Disconnected_VCC_to_AHU_ARU_share_cooling=Disconnected_VCC_to_AHU_ARU_share_cooling,
Disconnected_VCC_to_AHU_ARU_capacity_cooling_W=Disconnected_VCC_to_AHU_ARU_capacity_cooling_W,
Disconnected_VCC_to_AHU_SCU_share_cooling=Disconnected_VCC_to_AHU_SCU_share_cooling,
Disconnected_VCC_to_AHU_SCU_capacity_cooling_W=Disconnected_VCC_to_AHU_SCU_capacity_cooling_W,
Disconnected_VCC_to_ARU_SCU_share_cooling=Disconnected_VCC_to_ARU_SCU_share_cooling,
Disconnected_VCC_to_ARU_SCU_capacity_cooling_W=Disconnected_VCC_to_ARU_SCU_capacity_cooling_W,
Disconnected_VCC_to_AHU_ARU_SCU_share_cooling=Disconnected_VCC_to_AHU_ARU_SCU_share_cooling,
Disconnected_VCC_to_AHU_ARU_SCU_capacity_cooling_W=Disconnected_VCC_to_AHU_ARU_SCU_capacity_cooling_W,
Disconnected_single_effect_ACH_to_AHU_share_FP_cooling=Disconnected_single_effect_ACH_to_AHU_share_FP_cooling,
Disconnected_single_effect_ACH_to_AHU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_AHU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_AHU_share_ET_cooling=Disconnected_single_effect_ACH_to_AHU_share_ET_cooling,
Disconnected_single_effect_ACH_to_AHU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_AHU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_ARU_share_FP_cooling=Disconnected_single_effect_ACH_to_ARU_share_FP_cooling,
Disconnected_single_effect_ACH_to_ARU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_ARU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_ARU_share_ET_cooling=Disconnected_single_effect_ACH_to_ARU_share_ET_cooling,
Disconnected_single_effect_ACH_to_ARU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_ARU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_SCU_share_FP_cooling=Disconnected_single_effect_ACH_to_SCU_share_FP_cooling,
Disconnected_single_effect_ACH_to_SCU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_SCU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_SCU_share_ET_cooling=Disconnected_single_effect_ACH_to_SCU_share_ET_cooling,
Disconnected_single_effect_ACH_to_SCU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_SCU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_AHU_ARU_share_FP_cooling=Disconnected_single_effect_ACH_to_AHU_ARU_share_FP_cooling,
Disconnected_single_effect_ACH_to_AHU_ARU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_AHU_ARU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_AHU_ARU_share_ET_cooling=Disconnected_single_effect_ACH_to_AHU_ARU_share_ET_cooling,
Disconnected_single_effect_ACH_to_AHU_ARU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_AHU_ARU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_AHU_SCU_share_FP_cooling=Disconnected_single_effect_ACH_to_AHU_SCU_share_FP_cooling,
Disconnected_single_effect_ACH_to_AHU_SCU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_AHU_SCU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_AHU_SCU_share_ET_cooling=Disconnected_single_effect_ACH_to_AHU_SCU_share_ET_cooling,
Disconnected_single_effect_ACH_to_AHU_SCU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_AHU_SCU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_ARU_SCU_share_FP_cooling=Disconnected_single_effect_ACH_to_ARU_SCU_share_FP_cooling,
Disconnected_single_effect_ACH_to_ARU_SCU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_ARU_SCU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_ARU_SCU_share_ET_cooling=Disconnected_single_effect_ACH_to_ARU_SCU_share_ET_cooling,
Disconnected_single_effect_ACH_to_ARU_SCU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_ARU_SCU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_FP_cooling=Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_FP_cooling,
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_ET_cooling=Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_ET_cooling,
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_ET_cooling_W,
Disconnected_direct_expansion_to_AHU_share_cooling=Disconnected_direct_expansion_to_AHU_share_cooling,
Disconnected_direct_expansion_to_AHU_capacity_cooling_W=Disconnected_direct_expansion_to_AHU_capacity_cooling_W,
Disconnected_direct_expansion_to_ARU_share_cooling=Disconnected_direct_expansion_to_ARU_share_cooling,
Disconnected_direct_expansion_to_ARU_capacity_cooling_W=Disconnected_direct_expansion_to_ARU_capacity_cooling_W,
Disconnected_direct_expansion_to_SCU_share_cooling=Disconnected_direct_expansion_to_SCU_share_cooling,
Disconnected_direct_expansion_to_SCU_capacity_cooling_W=Disconnected_direct_expansion_to_SCU_capacity_cooling_W,
Disconnected_direct_expansion_to_AHU_SCU_share_cooling=Disconnected_direct_expansion_to_AHU_SCU_share_cooling,
Disconnected_direct_expansion_to_AHU_SCU_capacity_cooling_W=Disconnected_direct_expansion_to_AHU_SCU_capacity_cooling_W,
Disconnected_direct_expansion_to_AHU_ARU_share_cooling=Disconnected_direct_expansion_to_AHU_ARU_share_cooling,
Disconnected_direct_expansion_to_AHU_ARU_capacity_cooling_W=Disconnected_direct_expansion_to_AHU_ARU_capacity_cooling_W,
Disconnected_direct_expansion_to_ARU_SCU_share_cooling=Disconnected_direct_expansion_to_ARU_SCU_share_cooling,
Disconnected_direct_expansion_to_ARU_SCU_capacity_cooling_W=Disconnected_direct_expansion_to_ARU_SCU_capacity_cooling_W,
Disconnected_direct_expansion_to_AHU_ARU_SCU_share_cooling=Disconnected_direct_expansion_to_AHU_ARU_SCU_share_cooling,
Disconnected_direct_expansion_to_AHU_ARU_SCU_capacity_cooling_W=Disconnected_direct_expansion_to_AHU_ARU_SCU_capacity_cooling_W)
elif config.optimization.iscooling:
df = pd.read_csv(locator.get_optimization_disconnected_folder_building_result_cooling(building_names[i], cooling_all_units))
dfBest = df[df["Best configuration"] == 1]
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_FP_cooling = dfBest["single effect ACH to AHU_ARU_SCU Share (FP)"].iloc[0]
Disconnected_single_effect_ACH_to_SCU_share_FP_cooling = dfBest["single effect ACH to SCU Share (FP)"].iloc[0]
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_ET_cooling = dfBest["single effect ACH to AHU_ARU_SCU Share (ET)"].iloc[0]
Disconnected_direct_expansion_to_AHU_ARU_SCU_share_cooling = dfBest["DX to AHU_ARU_SCU Share"].iloc[0]
Disconnected_VCC_to_AHU_ARU_share_cooling = dfBest["VCC to AHU_ARU Share"].iloc[0]
Disconnected_VCC_to_AHU_ARU_SCU_share_cooling = dfBest["VCC to AHU_ARU_SCU Share"].iloc[0]
Disconnected_VCC_to_SCU_share_cooling = dfBest["VCC to SCU Share"].iloc[0]
if Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_FP_cooling == 1:
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_FP_cooling_W = dfBest["Nominal Power single effect ACH to AHU_ARU_SCU (FP) [W]"].iloc[0]
if Disconnected_single_effect_ACH_to_SCU_share_FP_cooling == 1:
Disconnected_single_effect_ACH_to_SCU_capacity_FP_cooling_W = dfBest["Nominal Power single effect ACH to SCU (FP) [W]"].iloc[0]
if Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_ET_cooling == 1:
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_ET_cooling_W = dfBest["Nominal Power single effect ACH to AHU_ARU_SCU (ET) [W]"].iloc[0]
if Disconnected_direct_expansion_to_AHU_ARU_SCU_share_cooling == 1:
Disconnected_direct_expansion_to_AHU_ARU_SCU_capacity_cooling_W = dfBest["Nominal Power DX to AHU_ARU_SCU [W]"].iloc[0]
if Disconnected_VCC_to_AHU_ARU_share_cooling == 1:
Disconnected_VCC_to_AHU_ARU_capacity_cooling_W = dfBest["Nominal Power VCC to AHU_ARU [W]"].iloc[0]
if Disconnected_VCC_to_AHU_ARU_SCU_share_cooling == 1:
Disconnected_VCC_to_AHU_ARU_SCU_capacity_cooling_W = dfBest["Nominal Power VCC to AHU_ARU_SCU [W]"].iloc[0]
if Disconnected_VCC_to_SCU_share_cooling == 1:
Disconnected_VCC_to_SCU_capacity_cooling_W = dfBest["Nominal Power VCC to SCU [W]"].iloc[0]
disconnected_capacity = dict(building_name=building_names[i],
Disconnected_Boiler_BG_share=Disconnected_Boiler_BG_share_heating,
Disconnected_Boiler_BG_capacity_W=Disconnected_Boiler_BG_capacity_heating_W,
Disconnected_Boiler_NG_share=Disconnected_Boiler_NG_share_heating,
Disconnected_Boiler_NG_capacity_W=Disconnected_Boiler_NG_capacity_heating_W,
Disconnected_FC_share=Disconnected_FC_share_heating,
Disconnected_FC_capacity_W=Disconnected_FC_capacity_heating_W,
Disconnected_GHP_share=Disconnected_GHP_share_heating,
Disconnected_GHP_capacity_W=Disconnected_GHP_capacity_heating_W,
Disconnected_VCC_to_AHU_share_cooling=Disconnected_VCC_to_AHU_share_cooling,
Disconnected_VCC_to_AHU_capacity_cooling_W=Disconnected_VCC_to_AHU_capacity_cooling_W,
Disconnected_VCC_to_ARU_share_cooling=Disconnected_VCC_to_ARU_share_cooling,
Disconnected_VCC_to_ARU_capacity_cooling_W=Disconnected_VCC_to_ARU_capacity_cooling_W,
Disconnected_VCC_to_SCU_share_cooling=Disconnected_VCC_to_SCU_share_cooling,
Disconnected_VCC_to_SCU_capacity_cooling_W=Disconnected_VCC_to_SCU_capacity_cooling_W,
Disconnected_VCC_to_AHU_ARU_share_cooling=Disconnected_VCC_to_AHU_ARU_share_cooling,
Disconnected_VCC_to_AHU_ARU_capacity_cooling_W=Disconnected_VCC_to_AHU_ARU_capacity_cooling_W,
Disconnected_VCC_to_AHU_SCU_share_cooling=Disconnected_VCC_to_AHU_SCU_share_cooling,
Disconnected_VCC_to_AHU_SCU_capacity_cooling_W=Disconnected_VCC_to_AHU_SCU_capacity_cooling_W,
Disconnected_VCC_to_ARU_SCU_share_cooling=Disconnected_VCC_to_ARU_SCU_share_cooling,
Disconnected_VCC_to_ARU_SCU_capacity_cooling_W=Disconnected_VCC_to_ARU_SCU_capacity_cooling_W,
Disconnected_VCC_to_AHU_ARU_SCU_share_cooling=Disconnected_VCC_to_AHU_ARU_SCU_share_cooling,
Disconnected_VCC_to_AHU_ARU_SCU_capacity_cooling_W=Disconnected_VCC_to_AHU_ARU_SCU_capacity_cooling_W,
Disconnected_single_effect_ACH_to_AHU_share_FP_cooling=Disconnected_single_effect_ACH_to_AHU_share_FP_cooling,
Disconnected_single_effect_ACH_to_AHU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_AHU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_AHU_share_ET_cooling=Disconnected_single_effect_ACH_to_AHU_share_ET_cooling,
Disconnected_single_effect_ACH_to_AHU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_AHU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_ARU_share_FP_cooling=Disconnected_single_effect_ACH_to_ARU_share_FP_cooling,
Disconnected_single_effect_ACH_to_ARU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_ARU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_ARU_share_ET_cooling=Disconnected_single_effect_ACH_to_ARU_share_ET_cooling,
Disconnected_single_effect_ACH_to_ARU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_ARU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_SCU_share_FP_cooling=Disconnected_single_effect_ACH_to_SCU_share_FP_cooling,
Disconnected_single_effect_ACH_to_SCU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_SCU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_SCU_share_ET_cooling=Disconnected_single_effect_ACH_to_SCU_share_ET_cooling,
Disconnected_single_effect_ACH_to_SCU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_SCU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_AHU_ARU_share_FP_cooling=Disconnected_single_effect_ACH_to_AHU_ARU_share_FP_cooling,
Disconnected_single_effect_ACH_to_AHU_ARU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_AHU_ARU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_AHU_ARU_share_ET_cooling=Disconnected_single_effect_ACH_to_AHU_ARU_share_ET_cooling,
Disconnected_single_effect_ACH_to_AHU_ARU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_AHU_ARU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_AHU_SCU_share_FP_cooling=Disconnected_single_effect_ACH_to_AHU_SCU_share_FP_cooling,
Disconnected_single_effect_ACH_to_AHU_SCU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_AHU_SCU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_AHU_SCU_share_ET_cooling=Disconnected_single_effect_ACH_to_AHU_SCU_share_ET_cooling,
Disconnected_single_effect_ACH_to_AHU_SCU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_AHU_SCU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_ARU_SCU_share_FP_cooling=Disconnected_single_effect_ACH_to_ARU_SCU_share_FP_cooling,
Disconnected_single_effect_ACH_to_ARU_SCU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_ARU_SCU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_ARU_SCU_share_ET_cooling=Disconnected_single_effect_ACH_to_ARU_SCU_share_ET_cooling,
Disconnected_single_effect_ACH_to_ARU_SCU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_ARU_SCU_capacity_ET_cooling_W,
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_FP_cooling=Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_FP_cooling,
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_FP_cooling_W=Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_FP_cooling_W,
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_ET_cooling=Disconnected_single_effect_ACH_to_AHU_ARU_SCU_share_ET_cooling,
Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_ET_cooling_W=Disconnected_single_effect_ACH_to_AHU_ARU_SCU_capacity_ET_cooling_W,
Disconnected_direct_expansion_to_AHU_share_cooling=Disconnected_direct_expansion_to_AHU_share_cooling,
Disconnected_direct_expansion_to_AHU_capacity_cooling_W=Disconnected_direct_expansion_to_AHU_capacity_cooling_W,
Disconnected_direct_expansion_to_ARU_share_cooling=Disconnected_direct_expansion_to_ARU_share_cooling,
Disconnected_direct_expansion_to_ARU_capacity_cooling_W=Disconnected_direct_expansion_to_ARU_capacity_cooling_W,
Disconnected_direct_expansion_to_SCU_share_cooling=Disconnected_direct_expansion_to_SCU_share_cooling,
Disconnected_direct_expansion_to_SCU_capacity_cooling_W=Disconnected_direct_expansion_to_SCU_capacity_cooling_W,
Disconnected_direct_expansion_to_AHU_SCU_share_cooling=Disconnected_direct_expansion_to_AHU_SCU_share_cooling,
Disconnected_direct_expansion_to_AHU_SCU_capacity_cooling_W=Disconnected_direct_expansion_to_AHU_SCU_capacity_cooling_W,
Disconnected_direct_expansion_to_AHU_ARU_share_cooling=Disconnected_direct_expansion_to_AHU_ARU_share_cooling,
Disconnected_direct_expansion_to_AHU_ARU_capacity_cooling_W=Disconnected_direct_expansion_to_AHU_ARU_capacity_cooling_W,
Disconnected_direct_expansion_to_ARU_SCU_share_cooling=Disconnected_direct_expansion_to_ARU_SCU_share_cooling,
Disconnected_direct_expansion_to_ARU_SCU_capacity_cooling_W=Disconnected_direct_expansion_to_ARU_SCU_capacity_cooling_W,
Disconnected_direct_expansion_to_AHU_ARU_SCU_share_cooling=Disconnected_direct_expansion_to_AHU_ARU_SCU_share_cooling,
Disconnected_direct_expansion_to_AHU_ARU_SCU_capacity_cooling_W=Disconnected_direct_expansion_to_AHU_ARU_SCU_capacity_cooling_W)
else:
raise ValueError("the region is not specified correctly")
else:
DCN_unit_configuration = saved_dataframe_for_each_generation['DCN unit configuration'][index]
if DCN_unit_configuration == 1: # corresponds to AHU in the central plant, so remaining load need to be provided by decentralized plant
decentralized_configuration = 'ARU_SCU'
df = pd.read_csv(
locator.get_optimization_disconnected_folder_building_result_cooling(building_names[i],
decentralized_configuration))
dfBest = df[df["Best configuration"] == 1]
Disconnected_direct_expansion_to_ARU_SCU_share_cooling = dfBest["DX to ARU_SCU Share"].iloc[0]
Disconnected_single_effect_ACH_to_ARU_SCU_share_FP_cooling = dfBest["single effect ACH to ARU_SCU Share (FP)"].iloc[0]
Disconnected_single_effect_ACH_to_ARU_SCU_share_ET_cooling = dfBest["single effect ACH to ARU_SCU Share (ET)"].iloc[0]
Disconnected_VCC_to_ARU_SCU_share_cooling = dfBest["VCC to ARU_SCU Share"].iloc[0]
if Disconnected_single_effect_ACH_to_ARU_SCU_share_FP_cooling == 1:
Disconnected_single_effect_ACH_to_ARU_SCU_capacity_FP_cooling_W = dfBest["Nominal Power single effect ACH to ARU_SCU (FP) [W]"].iloc[0]
if Disconnected_single_effect_ACH_to_ARU_SCU_share_ET_cooling == 1:
Disconnected_single_effect_ACH_to_ARU_SCU_capacity_ET_cooling_W = dfBest["Nominal Power single effect ACH to ARU_SCU (ET) [W]"].iloc[0]
if Disconnected_direct_expansion_to_ARU_SCU_share_cooling == 1:
Disconnected_direct_expansion_to_ARU_SCU_capacity_cooling_W = dfBest["Nominal Power DX to ARU_SCU [W]"].iloc[0]
if Disconnected_VCC_to_ARU_SCU_share_cooling == 1:
Disconnected_VCC_to_ARU_SCU_capacity_cooling_W = dfBest["Nominal Power VCC to ARU_SCU [W]"].iloc[0]
if DCN_unit_configuration == 2: # corresponds to ARU in the central plant, so remaining load need to be provided by decentralized plant
decentralized_configuration = 'AHU_SCU'
df = pd.read_csv(
locator.get_optimization_disconnected_folder_building_result_cooling(building_names[i],
decentralized_configuration))
dfBest = df[df["Best configuration"] == 1]
Disconnected_direct_expansion_to_AHU_SCU_share_cooling = dfBest["DX to AHU_SCU Share"].iloc[0]
Disconnected_single_effect_ACH_to_AHU_SCU_share_FP_cooling = dfBest["single effect ACH to AHU_SCU Share (FP)"].iloc[0]
Disconnected_single_effect_ACH_to_AHU_SCU_share_ET_cooling = dfBest["single effect ACH to AHU_SCU Share (ET)"].iloc[0]
Disconnected_VCC_to_ARU_SCU_share_cooling = dfBest["VCC to AHU_SCU Share"].iloc[0]
if Disconnected_single_effect_ACH_to_AHU_SCU_share_FP_cooling == 1:
Disconnected_single_effect_ACH_to_AHU_SCU_capacity_FP_cooling_W = dfBest["Nominal Power single effect ACH to AHU_SCU (FP) [W]"].iloc[0]
if Disconnected_single_effect_ACH_to_AHU_SCU_share_ET_cooling == 1:
Disconnected_single_effect_ACH_to_AHU_SCU_capacity_ET_cooling_W = dfBest["Nominal Power single effect ACH to AHU_SCU (ET) [W]"].iloc[0]
if Disconnected_direct_expansion_to_AHU_SCU_share_cooling == 1:
Disconnected_direct_expansion_to_AHU_SCU_capacity_cooling_W = dfBest["Nominal Power DX to AHU_SCU [W]"].iloc[0]
if Disconnected_VCC_to_AHU_SCU_share_cooling == 1:
Disconnected_VCC_to_AHU_SCU_capacity_cooling_W = dfBest["Nominal Power VCC to AHU_SCU [W]"].iloc[0]
if DCN_unit_configuration == 3: # corresponds to SCU in the central plant, so remaining load need to be provided by decentralized plant
decentralized_configuration = 'AHU_ARU'
df = pd.read_csv(locator.get_optimization_disconnected_folder_building_result_cooling(building_names[i], decentralized_configuration))
dfBest = df[df["Best configuration"] == 1]
Disconnected_direct_expansion_to_AHU_ARU_share_cooling = dfBest["DX to AHU_ARU Share"].iloc[0]
Disconnected_single_effect_ACH_to_AHU_ARU_share_FP_cooling = \
dfBest["single effect ACH to AHU_ARU Share (FP)"].iloc[0]
Disconnected_single_effect_ACH_to_AHU_ARU_share_ET_cooling = \
dfBest["single effect ACH to AHU_ARU Share (ET)"].iloc[0]
Disconnected_VCC_to_AHU_ARU_share_cooling = dfBest["VCC to AHU_ARU Share"].iloc[0]
if Disconnected_single_effect_ACH_to_AHU_ARU_share_FP_cooling == 1:
Disconnected_single_effect_ACH_to_AHU_ARU_capacity_FP_cooling_W = \
dfBest["Nominal Power single effect ACH to AHU_ARU (FP) [W]"].iloc[0]
if Disconnected_single_effect_ACH_to_AHU_ARU_share_ET_cooling == 1:
Disconnected_single_effect_ACH_to_AHU_ARU_capacity_ET_cooling_W = \
dfBest["Nominal Power single effect ACH to AHU_ARU (ET) [W]"].iloc[0]
if Disconnected_direct_expansion_to_AHU_ARU_share_cooling == 1:
Disconnected_direct_expansion_to_AHU_ARU_capacity_cooling_W = \
dfBest["Nominal Power DX to AHU_ARU [W]"].iloc[0]
if Disconnected_VCC_to_AHU_ARU_share_cooling == 1:
Disconnected_VCC_to_AHU_ARU_capacity_cooling_W = \
dfBest["Nominal Power VCC to AHU_ARU [W]"].iloc[0]
if DCN_unit_configuration == 4: # corresponds to AHU + ARU in the central plant, so remaining load need to be provided by decentralized plant
decentralized_configuration = 'SCU'
df = pd.read_csv(locator.get_optimization_disconnected_folder_building_result_cooling(building_names[i], decentralized_configuration))
dfBest = df[df["Best configuration"] == 1]
Disconnected_direct_expansion_to_SCU_share_cooling = dfBest["DX to SCU Share"].iloc[0]
Disconnected_single_effect_ACH_to_SCU_share_FP_cooling = \
dfBest["single effect ACH to SCU Share (FP)"].iloc[0]
Disconnected_single_effect_ACH_to_SCU_share_ET_cooling = \
dfBest["single effect ACH to SCU Share (ET)"].iloc[0]
Disconnected_VCC_to_SCU_share_cooling = dfBest["VCC to SCU Share"].iloc[0]
if Disconnected_single_effect_ACH_to_SCU_share_FP_cooling == 1:
Disconnected_single_effect_ACH_to_SCU_capacity_FP_cooling_W = \
dfBest["Nominal Power single effect ACH to SCU (FP) [W]"].iloc[0]
if Disconnected_single_effect_ACH_to_SCU_share_ET_cooling == 1:
Disconnected_single_effect_ACH_to_SCU_capacity_ET_cooling_W = \
dfBest["Nominal Power single effect ACH to SCU (ET) [W]"].iloc[0]
if Disconnected_direct_expansion_to_SCU_share_cooling == 1:
Disconnected_direct_expansion_to_SCU_capacity_cooling_W = \
dfBest["Nominal Power DX to SCU [W]"].iloc[0]
if Disconnected_VCC_to_SCU_share_cooling == 1:
Disconnected_VCC_to_SCU_capacity_cooling_W = \
dfBest["Nominal Power VCC to SCU [W]"].iloc[0]
if DCN_unit_configuration == 5: # corresponds to AHU + SCU in the central plant, so remaining load need to be provided by decentralized plant
decentralized_configuration = 'ARU'
df = pd.read_csv(locator.get_optimization_disconnected_folder_building_result_cooling(building_names[i], decentralized_configuration))
dfBest = df[df["Best configuration"] == 1]
Disconnected_direct_expansion_to_ARU_share_cooling = dfBest["DX to ARU Share"].iloc[0]
Disconnected_single_effect_ACH_to_ARU_share_FP_cooling = \
dfBest["single effect ACH to ARU Share (FP)"].iloc[0]
Disconnected_single_effect_ACH_to_ARU_share_ET_cooling = \
dfBest["single effect ACH to ARU Share (ET)"].iloc[0]
Disconnected_VCC_to_ARU_share_cooling = dfBest["VCC to ARU Share"].iloc[0]
if Disconnected_single_effect_ACH_to_ARU_share_FP_cooling == 1:
Disconnected_single_effect_ACH_to_ARU_capacity_FP_cooling_W = \
dfBest["Nominal Power single effect ACH to ARU (FP) [W]"].iloc[0]
if Disconnected_single_effect_ACH_to_ARU_share_ET_cooling == 1:
Disconnected_single_effect_ACH_to_ARU_capacity_ET_cooling_W = \
dfBest["Nominal Power single effect ACH to ARU (ET) [W]"].iloc[0]
if Disconnected_direct_expansion_to_ARU_share_cooling == 1:
Disconnected_direct_expansion_to_ARU_capacity_cooling_W = \
dfBest["Nominal Power DX to ARU [W]"].iloc[0]
if Disconnected_VCC_to_ARU_share_cooling == 1:
Disconnected_VCC_to_ARU_capacity_cooling_W = \
dfBest["Nominal Power VCC to ARU [W]"].iloc[0]
if DCN_unit_configuration == 6: # corresponds to ARU + SCU in the central plant, so remaining load need to be provided by decentralized plant
decentralized_configuration = 'AHU'
df = pd.read_csv(
locator.get_optimization_disconnected_folder_building_result_cooling(building_names[i],
decentralized_configuration))
dfBest = df[df["Best configuration"] == 1]
Disconnected_direct_expansion_to_AHU_share_cooling = dfBest["DX to AHU Share"].iloc[0]
Disconnected_single_effect_ACH_to_AHU_share_FP_cooling = \
dfBest["single effect ACH to AHU Share (FP)"].iloc[0]
Disconnected_single_effect_ACH_to_AHU_share_ET_cooling = \
dfBest["single effect ACH to AHU Share (ET)"].iloc[0]
Disconnected_VCC_to_AHU_share_cooling = dfBest["VCC to AHU Share"].iloc[0]
if Disconnected_single_effect_ACH_to_AHU_share_FP_cooling == 1:
Disconnected_single_effect_ACH_to_AHU_capacity_FP_cooling_W = \
| |
<reponame>NieR1711/Fire
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions, bot_has_permissions
#from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip
from fire.converters import Member, Role, TextChannel
import aiosqlite3
import functools
import datetime
import asyncio
import typing
import asyncpg
import json
import os
with open('config.json', 'r') as cfg:
config = json.load(cfg)
def isadmin(ctx):
"""Checks if the author is an admin"""
if str(ctx.author.id) not in config['admins']:
admin = False
else:
admin = True
return admin
class Premium(commands.Cog, name="Premium Commands"):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self.premiumGuilds = []
self.autoroles = {}
# self.reactroles = {}
self.joinroles = {}
self.rolepersists = {}
async def loadPremiumGuilds(self):
self.premiumGuilds = []
query = 'SELECT * FROM premium;'
guilds = await self.bot.db.fetch(query)
for guild in guilds:
self.premiumGuilds.append(guild['gid'])
async def loadAutoroles(self):
self.autoroles = {}
query = 'SELECT * FROM settings;'
settings = await self.bot.db.fetch(query)
for s in settings:
if s['autorole'] != 0:
guild = s['gid']
self.autoroles[guild] = {
"role": s['autorole']
}
# async def loadReactroles(self):
# self.reactroles = {}
# query = 'SELECT * FROM settings;'
# settings = await self.bot.db.fetch(query)
# for s in settings:
# if s['reactroleid'] != 0:
# guild = s['gid']
# self.reactroles[guild] = {
# "role": s['reactroleid'],
# "message": s['reactrolemid'],
# "emote": s['reactroleeid']
# }
async def loadJoinRoles(self):
self.joinroles = {}
query = 'SELECT * FROM joinableranks;'
ranks = await self.bot.db.fetch(query)
for r in ranks:
guild = r['gid']
if guild not in self.joinroles:
self.joinroles[guild] = []
self.joinroles[guild].append(r['rid'])
async def loadRolePersist(self):
self.rolepersists = {}
query = 'SELECT * FROM rolepersist;'
persists = await self.bot.db.fetch(query)
for p in persists:
guild = p['gid']
user = p['uid']
role = p['rid']
try:
self.rolepersists[guild][user] = {
"role": role
}
except KeyError:
self.rolepersists[guild] = {}
self.rolepersists[guild][user] = {
"role": role
}
async def cog_check(self, ctx: commands.Context):
"""
Local check, makes all commands in this cog premium only
"""
if ctx.guild.id in self.premiumGuilds:
return True
if await self.bot.is_team_owner(ctx.author):
return True
else:
return False
async def member_guild_check(self, member: discord.Member):
return True
# """
# Check if the guild from a member is premium
# """
# if member.guild.id in self.premiumGuilds:
# return True
# if await self.bot.is_team_owner(member):
# return True
# else:
# return False
@commands.Cog.listener()
async def on_ready(self):
await asyncio.sleep(10)
await self.loadPremiumGuilds()
await self.loadAutoroles()
# await self.loadReactroles()
await self.loadJoinRoles()
await self.loadRolePersist()
print('Premium functions loaded!')
@commands.command(name='loadpremium', description='Load premium data', hidden=True)
async def loadpremium(self, ctx):
'''PFXloadpremium'''
if await self.bot.is_team_owner(ctx.author):
await self.loadPremiumGuilds()
await self.loadAutoroles()
# await self.loadReactroles()
await self.loadJoinRoles()
await self.loadRolePersist()
await ctx.send('<a:fireSuccess:603214443442077708> Loaded data!')
else:
await ctx.send('no.')
# def gencrabrave(self, t, filename):
# clip = VideoFileClip("crabtemplate.mp4")
# text = TextClip(t[0], fontsize=48, color='white', font='Verdana')
# text2 = TextClip("____________________", fontsize=48, color='white', font='Verdana')\
# .set_position(("center", 210)).set_duration(15.4)
# text = text.set_position(("center", 200)).set_duration(15.4)
# text3 = TextClip(t[1], fontsize=48, color='white', font='Verdana')\
# .set_position(("center", 270)).set_duration(15.4)
#
# video = CompositeVideoClip([clip, text.crossfadein(1), text2.crossfadein(1), text3.crossfadein(1)]).set_duration(15.4)
#
# video.write_videofile(filename, preset='superfast', verbose=False)
# clip.close()
# video.close()
#
# @commands.command(name='crabrave', description='Make a Crab Rave meme!', hidden=True)
# async def crabmeme(self, ctx, *, text: str):
# '''Limited to owner only (for now, it may return) due to this command using like 90% CPU'''
# if not await self.bot.is_team_owner(ctx.author):
# return
# if not '|' in text:
# raise commands.ArgumentParsingError('Text should be separated by |')
# if not text:
# raise commands.MissingRequiredArgument('You need to provide text for the meme')
# filename = str(ctx.author.id) + '.mp4'
# t = text.upper().replace('| ', '|').split('|')
# if len(t) != 2:
# raise commands.ArgumentParsingError('Text should have 2 sections, separated by |')
# if (not t[0] and not t[0].strip()) or (not t[1] and not t[1].strip()):
# raise commands.ArgumentParsingError('Cannot use an empty string')
# msg = await ctx.send('🦀 Generating Crab Rave 🦀')
# await self.loop.run_in_executor(None, func=functools.partial(self.gencrabrave, t, filename))
# meme = discord.File(filename, 'crab.mp4')
# await msg.delete()
# await ctx.send(file=meme)
# os.remove(filename)
@commands.command(name='autorole', description='Automatically add a role to a user when they join')
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
@commands.guild_only()
async def autorole(self, ctx, role: Role = None):
'''PFXautorole [<role name/id/mention>]\nUse command without role argument to disable'''
query = 'SELECT * FROM settings WHERE gid = $1;'
guildsettings = await self.bot.db.fetch(query, ctx.guild.id)
if guildsettings == []:
# await self.bot.db.execute(f'INSERT INTO settings (\"gid\") VALUES ({ctx.guild.id});')
# await self.bot.conn.commit()
con = await self.bot.db.acquire()
async with con.transaction():
query = 'INSERT INTO settings (\"gid\") VALUES ($1);'
await self.bot.db.execute(query, ctx.guild.id)
await self.bot.db.release(con)
if not role:
# await self.bot.db.execute(f'UPDATE settings SET autorole = 0 WHERE gid = {ctx.guild.id}')
# await self.bot.conn.commit()
con = await self.bot.db.acquire()
async with con.transaction():
query = 'UPDATE settings SET autorole = 0 WHERE gid = $1;'
await self.bot.db.execute(query, ctx.guild.id)
await self.bot.db.release(con)
try:
self.autoroles[ctx.guild.id] = None
except KeyError:
pass
return await ctx.send(f'<a:fireSuccess:603214443442077708> Successfully disabled auto-role in {discord.utils.escape_mentions(ctx.guild.name)}')
else:
roleid = role.id
# await self.bot.db.execute(f'UPDATE settings SET autorole = {roleid} WHERE gid = {ctx.guild.id}')
# await self.bot.conn.commit()
con = await self.bot.db.acquire()
async with con.transaction():
query = 'UPDATE settings SET autorole = $1 WHERE gid = $2'
await self.bot.db.execute(query, roleid, ctx.guild.id)
await self.bot.db.release(con)
self.autoroles[ctx.guild.id] = {
"role": roleid
}
return await ctx.send(f'<a:fireSuccess:603214443442077708> Successfully enabled auto-role in {discord.utils.escape_mentions(ctx.guild.name)}! All new members will recieve the {discord.utils.escape_mentions(role.name)} role.')
# @commands.command(name='reactrole', description='Automatically add a role to a user when they react to a message')
# @has_permissions(manage_roles=True)
# @bot_has_permissions(manage_roles=True)
# @commands.guild_only()
# async def reactrole(self, ctx, role: Role = None, message: int = None, emote: typing.Union[int, str] = None):
# '''PFXautorole [<role name/id/mention> <message id> <emote>]\nUse command without arguments to disable'''
# query = 'SELECT * FROM settings WHERE gid = $1;'
# guildsettings = await self.bot.db.fetch(query, ctx.guild.id)
# if guildsettings == []:
# # await self.bot.db.execute(f'INSERT INTO settings (\"gid\") VALUES ({ctx.guild.id});')
# # await self.bot.conn.commit()
# con = await self.bot.db.acquire()
# async with con.transaction():
# query = 'INSERT INTO settings (\"gid\") VALUES ($1);'
# await self.bot.db.execute(query, ctx.guild.id)
# await self.bot.db.release(con)
# if not role:
# # await self.bot.db.execute(f'UPDATE settings SET (\"reactroleid\", \"reactrolemid\", \"reactroleeid\") = (0, 0, 0) WHERE gid = {ctx.guild.id}')
# # await self.bot.conn.commit()
# con = await self.bot.db.acquire()
# async with con.transaction():
# query = 'UPDATE settings SET (\"reactroleid\", \"reactrolemid\", \"reactroleeid\") = (0, 0, 0) WHERE gid = $1;'
# await self.bot.db.execute(query, ctx.guild.id)
# await self.bot.db.release(con)
# try:
# self.reactroles[ctx.guild.id] = None
# except KeyError:
# pass
# return await ctx.send(f'<a:fireSuccess:603214443442077708> Successfully disabled reaction role in {discord.utils.escape_mentions(ctx.guild.name)}')
# else:
# try:
# msg = await ctx.channel.fetch_message(message)
# except:
# for channel in ctx.guild.text_channels:
# perms = ctx.guild.me.permissions_in(channel)
# try:
# msg = await channel.fetch_message(message)
# except:
# continue
# if not msg:
# raise commands.ArgumentParsingError('Missing Message ID')
# if not emote:
# raise commands.ArgumentParsingError('Missing Emote')
# roleid = role.id
# messageid = msg.id
# try:
# emote = int(emote)
# except Exception:
# emote = str(emote)
# if type(emote) == int:
# emoteid = discord.utils.get(self.bot.emojis, id=emote)
# if emoteid == None:
# raise commands.ArgumentParsingError('Can\'t find emote from ID.')
# else:
# emote = emoteid
# emoteid = emoteid.id
# elif type(emote) == str:
# emoteid = emote
# # await self.bot.db.execute(f'UPDATE settings SET (\"reactroleid\", \"reactrolemid\", \"reactroleeid\") = ({roleid}, {messageid}, \"{emoteid}\") WHERE gid = {ctx.guild.id}')
# # await self.bot.conn.commit()
# con = await self.bot.db.acquire()
# async with con.transaction():
# query = 'UPDATE settings SET (\"reactroleid\", \"reactrolemid\", \"reactroleeid\") = ($2, $3, $4) WHERE gid = $1;'
# await self.bot.db.execute(query, ctx.guild.id, roleid, messageid, emoteid)
# await self.bot.db.release(con)
# await msg.add_reaction(emote)
# self.reactroles[ctx.guild.id] = {
# "role": roleid,
# "message": messageid,
# "emote": emoteid
# }
# return await ctx.send(f'<a:fireSuccess:603214443442077708> Successfully enabled reaction role in {discord.utils.escape_mentions(ctx.guild.name)}!')
@commands.command(name='antiraid', description='Configure the channel for antiraid alerts')
@commands.has_permissions(manage_channels=True)
@commands.bot_has_permissions(ban_members=True)
@commands.guild_only()
async def antiraid(self, ctx, channel: TextChannel = None):
if not channel:
con = await self.bot.db.acquire()
async with con.transaction():
mquery = 'UPDATE settings SET antiraid = $1 WHERE gid = $2;'
await self.bot.db.execute(mquery, 0, ctx.guild.id)
await self.bot.db.release(con)
settings = self.bot.get_cog('Settings')
await settings.loadSettings()
return await ctx.send(f'I\'ve reset the antiraid alert channel.')
else:
con = await self.bot.db.acquire()
async with con.transaction():
mquery = 'UPDATE settings SET antiraid = $1 WHERE gid = $2;'
await self.bot.db.execute(mquery, channel.id, ctx.guild.id)
await self.bot.db.release(con)
settings = self.bot.get_cog('Settings')
await settings.loadSettings()
return await ctx.send(f'Antiraid alerts will now be sent in {channel.mention}')
async def _setraidmsg(self, id: int, message: str):
self.raidmsgs[id] = message
await asyncio.sleep(300)
self.raidmsgs[id] = None
self.bot.dispatch('msgraid_attempt', self.bot.get_guild(id), self.msgraiders[id])
@commands.command(name='raidmsg', description='Set the raid message for the server. Anyone who says it will get banned')
@commands.has_permissions(ban_members=True)
@commands.bot_has_permissions(ban_members=True)
async def raidmsg(self, ctx, *, msg: str):
await ctx.message.delete()
await ctx.send(f'Raid message set! Anyone who sends that message in the next 5 minutes will be added to the list.\nI will alert you in your raid alerts channel with the list of raiders :)')
asyncio.get_event_loop().create_task(self._setraidmsg(ctx.guild.id, msg))
@commands.command(name='addrank', description='Add a role that users can join through the rank command.')
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
@commands.guild_only()
async def addrank(self, ctx, *, role: Role):
'''PFXaddrank <role>'''
# await self.bot.db.execute(f'INSERT INTO joinableranks (\"gid\", \"rid\") VALUES ({ctx.guild.id}, {role.id});')
# await self.bot.conn.commit()
try:
if role.id in self.joinroles[ctx.guild.id]:
return await ctx.send('<a:fireFailed:603214400748257302> You cannot add an existing rank.')
except Exception:
pass
con = await self.bot.db.acquire()
async with con.transaction():
query = 'INSERT INTO joinableranks (\"gid\", \"rid\") VALUES ($1, $2);'
await self.bot.db.execute(query, ctx.guild.id, role.id)
await self.bot.db.release(con)
try:
self.joinroles[ctx.guild.id].append(role.id)
except KeyError:
self.joinroles[ctx.guild.id] = []
self.joinroles[ctx.guild.id].append(role.id)
await ctx.send(f'<a:fireSuccess:603214443442077708> Successfully added the rank {discord.utils.escape_mentions(role.name)}!')
logchannels = self.bot.get_cog("Settings").logchannels
logid = logchannels[ctx.guild.id] if ctx.guild.id in logchannels else None
if logid:
logch = ctx.guild.get_channel(logid['modlogs'])
if logch:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow())
embed.set_author(name=f'Rank Added | {role.name}', icon_url=str(ctx.guild.icon_url))
embed.add_field(name='User', value=ctx.author.mention, inline=False)
embed.add_field(name='Role', value=f'{role.mention}', inline=False)
embed.set_footer(text=f'User ID: {ctx.author.id} | Role ID: {role.id}')
try:
await logch.send(embed=embed)
except Exception:
pass
return
@commands.command(name='delrank', description='Remove a rank from the list of joinable roles.')
@has_permissions(manage_roles=True)
@bot_has_permissions(manage_roles=True)
@commands.guild_only()
async def delrank(self, ctx, *, role: Role):
'''PFXdelrank <role>'''
# await self.bot.db.execute(f'DELETE FROM joinableranks WHERE rid = {role.id};')
# await self.bot.conn.commit()
con = await self.bot.db.acquire()
async with con.transaction():
query = 'DELETE FROM joinableranks WHERE rid = $1;'
await self.bot.db.execute(query, role.id)
await self.bot.db.release(con)
try:
self.joinroles[ctx.guild.id].remove(role.id)
except KeyError:
pass
await ctx.send(f'<a:fireSuccess:603214443442077708> Successfully removed the rank {discord.utils.escape_mentions(role.name)}!')
logchannels = self.bot.get_cog("Settings").logchannels
logid = logchannels[ctx.guild.id] if ctx.guild.id in logchannels else None
if logid:
logch = ctx.guild.get_channel(logid['modlogs'])
if logch:
embed = discord.Embed(color=discord.Color.red(), timestamp=datetime.datetime.utcnow())
embed.set_author(name=f'Rank Removed | {role.name}', icon_url=str(ctx.guild.icon_url))
embed.add_field(name='User', value=ctx.author.mention, inline=False)
embed.add_field(name='Role', value=f'{role.mention}', inline=False)
embed.set_footer(text=f'User ID: {ctx.author.id} | Role ID: {role.id}')
try:
await logch.send(embed=embed)
except Exception:
pass
return
@commands.command(name='rank', description='List all available ranks and join a rank', aliases=['ranks'])
@bot_has_permissions(manage_roles=True)
@commands.guild_only()
async def rank(self, ctx, *, role: Role = None):
'''PFXrank [<rank>]'''
if not role:
try:
ranks = self.joinroles[ctx.guild.id]
except KeyError:
return await ctx.send('<a:fireFailed:603214400748257302> Seems like there\'s no ranks set for this guild :c')
roles = []
someremoved = 0
for rank in ranks:
role = discord.utils.get(ctx.guild.roles, id=rank)
if not role:
# await self.bot.db.execute(f'DELETE FROM joinableranks WHERE rid = {rank};')
# await self.bot.conn.commit()
con = await self.bot.db.acquire()
async with con.transaction():
query = 'DELETE | |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from os.path import join, basename, exists
import six
import numpy as np
import utool as ut
print, rrr, profile = ut.inject2(__name__)
@six.add_metaclass(ut.ReloadingMetaclass)
class DataSet(ut.NiceRepr):
"""
helper class for managing dataset paths and general metadata
SeeAlso:
python -m wbia_cnn.ingest_data --test-get_wbia_part_siam_dataset --show
CommandLine:
python -m wbia_cnn.dataset DataSet
Example:
>>> from wbia_cnn.ingest_data import * # NOQA
>>> dataset = grab_mnist_category_dataset()
>>> dataset.print_dir_structure()
>>> # ----
>>> from wbia_cnn.models import MNISTModel
>>> model = MNISTModel(batch_size=128, data_shape=(24, 24, 1),
>>> output_dims=10, dataset_dpath=dataset.dataset_dpath)
>>> model.print_structure()
"""
def __init__(
dataset,
cfgstr=None,
training_dpath='.',
data_shape=None,
num_data=None,
name=None,
ext='.pkl',
):
dataset.name = name
dataset.cfgstr = cfgstr
dataset.training_dpath = training_dpath
assert data_shape is not None, 'must specify'
dataset._ext = ext
dataset._info = {
'num_data': num_data,
'data_shape': data_shape,
'num_labels': None,
'unique_labels': None,
'data_per_label': None,
}
# Dictionary for storing different data subsets
dataset.fpath_dict = {
'full': {
'data': dataset.data_fpath,
'labels': dataset.labels_fpath,
'metadata': dataset.metadata_fpath,
}
}
# Hacky dictionary for custom things
# Probably should be refactored
dataset._lazy_cache = ut.LazyDict()
def __nice__(dataset):
return '(' + dataset.dataset_id + ')'
@property
def hashid(dataset):
if dataset.cfgstr is None:
return ''
else:
return ut.hashstr27(dataset.cfgstr, hashlen=8)
@property
def dataset_id(dataset):
shape_str = 'x'.join(ut.lmap(str, dataset._info['data_shape']))
num_data = dataset._info['num_data']
parts = []
if dataset.name is not None:
parts.append(dataset.name)
if num_data is not None:
parts.append(str(num_data))
parts.append(shape_str)
if dataset.hashid:
parts.append(dataset.hashid)
dsid = '_'.join(parts)
return dsid
@property
def dataset_dpath(dataset):
return join(dataset.training_dpath, dataset.dataset_id)
@property
def split_dpath(dataset):
split_dpath = join(dataset.dataset_dpath, 'splits')
return split_dpath
@property
def full_dpath(dataset):
return join(dataset.dataset_dpath, 'full')
@property
def info_fpath(dataset):
return join(dataset.full_dpath, '%s_info.json' % (dataset.hashid))
@property
def data_fpath(dataset):
return join(dataset.full_dpath, '%s_data%s' % (dataset.hashid, dataset._ext))
@property
def labels_fpath(dataset):
return join(dataset.full_dpath, '%s_labels%s' % (dataset.hashid, dataset._ext))
@property
def metadata_fpath(dataset):
return join(dataset.full_dpath, '%s_metadata%s' % (dataset.hashid, dataset._ext))
@classmethod
def new_training_set(cls, **kwargs):
dataset = cls(**kwargs)
# Define auxillary data
try:
# dataset.build_auxillary_data()
dataset.ensure_symlinked()
dataset.save_alias(dataset.alias_key)
except Exception as ex:
ut.printex(ex, 'WARNING was not able to generate splis or save alias')
return dataset
def hasprop(dataset, key):
return key in dataset._lazy_cache.keys()
def getprop(dataset, key, *d):
if len(d) == 0:
return dataset._lazy_cache[key]
else:
assert len(d) == 1
if key in dataset._lazy_cache:
return dataset._lazy_cache[key]
else:
return d[0]
def setprop(dataset, key, val):
dataset._lazy_cache[key] = val
def subset(dataset, key):
""" loads a test/train/valid/full data subset """
data = dataset.subset_data(key)
labels = dataset.subset_labels(key)
return data, labels
def print_subset_info(dataset, key='full'):
data, labels = dataset.subset(key)
dataset.print_dataset_info(data, labels, key)
@property
def data_shape(dataset):
data_shape = dataset._info['data_shape']
assert data_shape is not None, 'data_shape is unknown'
return data_shape
@property
def unique_labels(dataset):
unique_labels = dataset._info['unique_labels']
assert unique_labels is not None, 'unique_labels is unknown'
return unique_labels
@property
def labels(dataset):
return dataset.subset_labels()
@property
def data(dataset):
return dataset.subset_data()
@property
def metadata(dataset):
return dataset.subset_metadata()
def asdict(dataset):
# save all args passed into constructor as a dict
key_list = ut.get_func_argspec(dataset.__init__).args[1:]
data_dict = ut.dict_subset(dataset.__dict__, key_list)
return data_dict
@ut.memoize
def subset_data(dataset, key='full'):
data_fpath = dataset.fpath_dict[key]['data']
data = ut.load_data(data_fpath, verbose=True)
if len(data.shape) == 3:
# add channel dimension for implicit grayscale
data.shape = data.shape + (1,)
return data
@ut.memoize
def subset_labels(dataset, key='full'):
labels_fpath = dataset.fpath_dict[key]['labels']
labels = (
None if labels_fpath is None else ut.load_data(labels_fpath, verbose=True)
)
return labels
@ut.memoize
def subset_metadata(dataset, key='full'):
metadata_fpath = dataset.fpath_dict[key].get('metadata', None)
if metadata_fpath is not None:
flat_metadata = ut.load_data(metadata_fpath, verbose=True)
else:
flat_metadata = None
return flat_metadata
def clear_cache(dataset, key=None):
cached_func_list = [
dataset.subset_data,
dataset.subset_labels,
dataset.subset_metadata,
]
if key is None:
for cached_func in cached_func_list:
cached_func.cache.clear()
else:
for cached_func in cached_func_list:
if key in cached_func.cache:
del cached_func.cache[key]
@staticmethod
def print_dataset_info(data, labels, key):
labelhist = {key: len(val) for key, val in ut.group_items(labels, labels).items()}
stats_dict = ut.get_stats(data.ravel())
ut.delete_keys(stats_dict, ['shape', 'nMax', 'nMin'])
print('[dataset] Dataset Info: ')
print('[dataset] * Data:')
print('[dataset] %s_data(shape=%r, dtype=%r)' % (key, data.shape, data.dtype))
print(
'[dataset] %s_memory(data) = %r'
% (
key,
ut.get_object_size_str(data),
)
)
print(
'[dataset] %s_stats(data) = %s'
% (
key,
ut.repr2(stats_dict, precision=2),
)
)
print('[dataset] * Labels:')
print(
'[dataset] %s_labels(shape=%r, dtype=%r)'
% (key, labels.shape, labels.dtype)
)
print('[dataset] %s_label histogram = %s' % (key, ut.repr2(labelhist)))
def interact(dataset, key='full', **kwargs):
"""
python -m wbia_cnn --tf netrun --db mnist --ensuredata --show --datatype=category
python -m wbia_cnn --tf netrun --db PZ_MTEST --acfg ctrl --ensuredata --show
"""
from wbia_cnn import draw_results
# interact_func = draw_results.interact_siamsese_data_patches
interact_func = draw_results.interact_dataset
# Automatically infer which lazy properties are needed for the
# interaction.
kwarg_items = ut.recursive_parse_kwargs(interact_func)
kwarg_keys = ut.get_list_column(kwarg_items, 0)
interact_kw = {
key_: dataset.getprop(key_) for key_ in kwarg_keys if dataset.hasprop(key_)
}
interact_kw.update(**kwargs)
# TODO : generalize
data = dataset.subset_data(key)
labels = dataset.subset_labels(key)
metadata = dataset.subset_metadata(key)
return interact_func(
labels, data, metadata, dataset._info['data_per_label'], **interact_kw
)
def view_directory(dataset):
ut.view_directory(dataset.dataset_dpath)
vd = view_directory
def has_split(dataset, key):
return key in dataset.fpath_dict
def get_split_fmtstr(dataset, forward=False):
# Parse direction
parse_fmtstr = '{key}_{size:d}_{type_:w}{ext}'
if forward:
# hack, need to do actual parsing of the parser here
def parse_inverse_format(parse_fmtstr):
# if True:
# hack impl
return parse_fmtstr.replace(':w}', '}')
# else:
# # Try and make a better impl
# nestings = ut.parse_nestings(parse_fmtstr, only_curl=True)
# ut.recombine_nestings(nestings)
# Generate direction
fmtstr = parse_inverse_format(parse_fmtstr)
else:
fmtstr = parse_fmtstr
return fmtstr
def load_splitsets(dataset):
import parse
fpath_dict = {}
fmtstr = dataset.get_split_fmtstr(forward=False)
for fpath in ut.ls(dataset.split_dpath):
parsed = parse.parse(fmtstr, basename(fpath))
if parsed is None:
print('WARNING: invalid filename %r' % (fpath,))
continue
key = parsed['key']
type_ = parsed['type_']
splitset = fpath_dict.get(key, {})
splitset[type_] = fpath
fpath_dict[key] = splitset
# check validity of loaded data
for key, val in fpath_dict.items():
assert 'data' in val, 'subset missing data'
dataset.fpath_dict.update(**fpath_dict)
def load(dataset):
dataset.ensure_dirs()
dataset.ensure_symlinked()
if not exists(dataset.info_fpath):
raise IOError('dataset info manifest cache miss')
else:
dataset._info = ut.load_data(dataset.info_fpath)
if not exists(dataset.data_fpath):
raise IOError('dataset data cache miss')
dataset.load_splitsets()
# Hack
if not exists(dataset.fpath_dict['full']['metadata']):
dataset.fpath_dict['full']['metadata'] = None
def save(dataset, data, labels, metadata=None, data_per_label=1):
ut.save_data(dataset.data_fpath, data)
ut.save_data(dataset.labels_fpath, labels)
if metadata is not None:
ut.save_data(dataset.metadata_fpath, metadata)
else:
dataset.fpath_dict['full']['metadata'] = None
# cache the data because it is likely going to be used to define a
# splitset
dataset.subset_data.cache['full'] = data
dataset.subset_labels.cache['full'] = labels
dataset.subset_metadata.cache['full'] = metadata
# Infer the rest of the required data info
dataset._info['num_labels'] = len(labels)
try:
dataset._info['unique_labels'] = np.unique(labels)
except ValueError:
dataset._info['unique_labels'] = np.nan
dataset._info['data_per_label'] = data_per_label
ut.save_data(dataset.info_fpath, dataset._info)
def add_split(dataset, key, idxs):
print('[dataset] adding split %r' % (key,))
# Build subset filenames
ut.ensuredir(dataset.split_dpath)
ext = dataset._ext
fmtdict = dict(key=key, ext=ext, size=len(idxs))
fmtstr = dataset.get_split_fmtstr(forward=True)
splitset = {
type_: join(dataset.split_dpath, fmtstr.format(type_=type_, **fmtdict))
for type_ in ['data', 'labels', 'metadata']
}
# Partition data into the subset
part_dict = {
'data': dataset.data.take(idxs, axis=0),
'labels': dataset.labels.take(idxs, axis=0),
}
if dataset.metadata is not None:
taker = ut.partial(ut.take, index_list=idxs)
part_dict['metadata'] = ut.map_dict_vals(taker, dataset.metadata)
# Write splitset data to files
for type_ in part_dict.keys():
ut.save_data(splitset[type_], part_dict[type_])
# Register filenames with dataset
dataset.fpath_dict[key] = splitset
def ensure_symlinked(dataset):
"""
Creates a symlink to the training path in the training junction
"""
junction_dpath = get_juction_dpath()
dataset_dname = basename(dataset.dataset_dpath)
dataset_dlink = join(junction_dpath, dataset_dname)
if exists(dataset_dlink):
ut.delete(dataset_dlink)
ut.symlink(dataset.dataset_dpath, dataset_dlink)
def ensure_dirs(dataset):
ut.ensuredir(dataset.full_dpath)
ut.ensuredir(dataset.split_dpath)
def print_dir_structure(dataset):
print(dataset.training_dpath)
print(dataset.dataset_dpath)
print(dataset.data_fpath)
print(dataset.labels_fpath)
print(dataset.metadata_fpath)
print(dataset.info_fpath)
print(dataset.full_dpath)
print(dataset.split_dpath)
def print_dir_tree(dataset):
fpaths = ut.glob(dataset.dataset_dpath, '*', recursive=True)
print('\n'.join(sorted(fpaths)))
# def build_auxillary_data(dataset):
# # Make test train validatation sets
# data_fpath = dataset.data_fpath
# labels_fpath = dataset.labels_fpath
# metadata_fpath = dataset.metadata_fpath
# data_per_label = dataset.data_per_label
# split_names = ['train', 'test', 'valid']
# fractions = [.7, .2, .1]
# named_split_fpath_dict = ondisk_data_split(
# data_fpath, labels_fpath, metadata_fpath,
# data_per_label, split_names, fractions,
# )
# for key, val in named_split_fpath_dict.items():
# splitset = dataset.fpath_dict.get(key, {})
# splitset.update(**val)
# dataset.fpath_dict[key] = splitset
def get_alias_dict_fpath():
alias_fpath = join(get_juction_dpath(), 'alias_dict_v2.txt')
return alias_fpath
def get_juction_dpath():
r"""
Returns:
str: junction_dpath
CommandLine:
python -m wbia_cnn --tf get_juction_dpath --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia_cnn.dataset import * # NOQA
>>> junction_dpath = get_juction_dpath()
>>> result = ('junction_dpath = %s' % (str(junction_dpath),))
>>> print(result)
>>> ut.quit_if_noshow()
>>> ut.vd(junction_dpath)
"""
junction_dpath = ut.ensure_app_resource_dir('wbia_cnn', 'training_junction')
# Hacks to keep junction clean
home_dlink = ut.truepath('~/training_junction')
if not exists(home_dlink):
ut.symlink(junction_dpath, home_dlink)
ut.remove_broken_links(junction_dpath)
return junction_dpath
def stratified_shuffle_split(y, fractions, rng=None, class_weights=None):
"""
modified from sklearn to make n splits instaed of 2
"""
n_samples = len(y)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
# TODO: weighted version
# class_counts_ = np.array([sum([w.get(cx, 0) for w |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.