input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
np.nonzero(ball_box)
paddle_pixels = np.nonzero(paddle_line)
nonlocal trigger_pulled
if action == 1:
trigger_pulled = True
if done:
trigger_pulled = False
if not trigger_pulled:
return 'N'
try:
# we get dim 1 of ball pixels and 0 of paddle pixels, which are both horizontal axis
ball_x_center = (np.min(ball_pixels[1]) +
np.max(ball_pixels[1])) / 2.
paddle_x_center = (np.min(paddle_pixels[0]) +
np.max(paddle_pixels[0])) / 2.
# case where paddle is too far to the right
if ball_x_center - paddle_x_center < -limit and (action != 3):
return 1
# too far to the left
elif ball_x_center - paddle_x_center > limit and (action != 2):
return -1
else:
return 0
except ValueError:
return 'N'
def inv_translation_fn(token):
if token == 1:
return [0, 1, 2]
elif token == -1:
return [0, 1, 3]
if is_dense:
return SoftDenseConstraint('paddle_direction_dense_Breakout',
dfa_string,
reward_shaping,
translation_fn,
gamma=0.99)
return Constraint('paddle_direction_Breakout',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=inv_translation_fn)
@register('1d_dithering2_Breakout')
def one_d_dithering_breakout(is_hard, is_dense, reward_shaping, k=2):
with open("./baselines/constraint/constraints/1d_dithering.lisp"
) as dfa_file:
dfa_string = dfa_file.read()
if is_dense:
return SoftDenseConstraint('1d_dithering2_dense_Breakout',
dfa_string,
reward_shaping,
lambda obs, action, done: action,
gamma=0.99)
return Constraint('1d_dithering2_Breakout',
dfa_string,
is_hard,
reward_shaping,
lambda obs, action, done: action,
inv_translation_fn=lambda token: [token])
@register('1d_dithering2_SpaceInvaders')
def one_d_dithering_spaceinvaders(is_hard, is_dense, reward_shaping, k=2):
with open("./baselines/constraint/constraints/1d_dithering.lisp"
) as dfa_file:
dfa_string = dfa_file.read()
translation_dict = dict([(0, 1), (1, 1), (2, 2), (3, 3), (4, 2), (5, 3)])
inv_translation_dict = {1: [0, 1], 2: [2, 4], 3: [3, 5]}
translation_fn = lambda obs, action, done: translation_dict[action]
inv_translation_fn = lambda token: inv_translation_dict[token]
if is_dense:
return SoftDenseConstraint('1d_dithering2_dense_Breakout',
dfa_string,
reward_shaping,
translation_dict,
gamma=.99)
return Constraint('1d_dithering2_SpaceInvaders',
dfa_string,
is_hard,
reward_shaping,
translation_fn,
inv_translation_fn=inv_translation_fn)
def build_one_d_actuation(num_actions, k):
dfa_string_template = '(defdfa {name} (({input_symbols}) ({states}) {start_state} ({accepting_states})) ({transitions}))'
transition_template = '({initial_state} {target_state} {symbol})'
name = '1d_{k}_actuation'.format(k=k)
input_symbols = ' '.join(list(map(str, range(num_actions))))
states = ' '.join(list(map(str, range(num_actions * k +
1)))) # add one for the start state
start_state = 0
accepting_states = ' '.join(
[str(a * k) for a in range(1, num_actions + 1)])
transitions = []
for a in range(num_actions):
transitions.append(
transition_template.format(initial_state=0,
target_state=a * k + 1,
symbol=a))
for r in range(k - 1):
transitions.append(
transition_template.format(initial_state=a * k + r + 1,
target_state=a * k + r + 2,
symbol=a))
transitions = ' '.join(transitions)
dfa_string = dfa_string_template.format(name=name,
input_symbols=input_symbols,
states=states,
start_state=start_state,
accepting_states=accepting_states,
transitions=transitions)
return dfa_string
@register('1d_actuation4_Breakout')
def oned_actuation_breakout4(is_hard, is_dense, reward_shaping):
if is_dense:
return SoftDenseConstraint(
'1d_actuation_dense_breakout4',
build_one_d_actuation(4, k=4),
reward_shaping,
translation_fn=lambda obs, action, done: action,
gamma=0.99)
return Constraint('1d_actuation_breakout4',
build_one_d_actuation(4, k=4),
is_hard,
reward_shaping,
translation_fn=lambda obs, action, done: action,
inv_translation_fn=lambda token: [token])
@register('1d_actuation4_SpaceInvaders')
def oned_actuation_spaceinvaders4(is_hard, is_dense, reward_shaping):
translation_dict = dict([(0, 0), (1, 1), (2, 2), (3, 3), (4, 2), (5, 3)])
inv_translation_dict = {0: [0], 1: [1], 2: [2, 4], 3: [3, 5]}
translation_fn = lambda obs, action, done: translation_dict[action]
inv_translation_fn = lambda token: inv_translation_dict[token]
if is_dense:
return SoftDenseConstraint('1d_actuation_dense_SpaceInvaders',
build_one_d_actuation(4, k=4),
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('1d_actuation_SpaceInvaders',
build_one_d_actuation(4, k=4),
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=inv_translation_fn)
@register("2d_actuation4_Seaquest")
def twod_actuation4_seaquest(is_hard, is_dense, reward_shaping):
with open("./baselines/constraint/constraints/seaquest_actuation.lisp"
) as dfa_file:
dfa_string = dfa_file.read()
if is_dense:
return SoftDenseConstraint(
'2d_actuation4_dense_Seaquest',
dfa_string,
reward_shaping,
translation_fn=lambda obs, action, done: action,
gamma=0.99)
return Constraint('2d_actuation4_Seaquest',
dfa_string,
is_hard,
reward_shaping,
translation_fn=lambda obs, action, done: action,
inv_translation_fn=lambda token: [token])
@register("2d_dithering4_Seaquest")
def twod_dithering4_seaquest(is_hard, is_dense, reward_shaping):
with open("./baselines/constraint/constraints/seaquest_dithering.lisp"
) as dfa_file:
dfa_string = dfa_file.read()
if is_dense:
return SoftDenseConstraint(
'2d_dithering4_dense_Seaquest',
dfa_string,
reward_shaping,
translation_fn=lambda obs, action, done: action,
gamma=0.99)
return Constraint('2d_dithering4_Seaquest',
dfa_string,
is_hard,
reward_shaping,
translation_fn=lambda obs, action, done: action,
inv_translation_fn=lambda token: [token])
@register("proximity_pointgoal1")
def proximity_point_goal_one(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
token = max(obs['hazards_lidar'] - 0.6) // 0.04
return token
if is_linear:
return LinearConstraint('proximity_PointGoal1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
if is_dense:
return SoftDenseConstraint('proximity_PointGoal1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_PointGoal1',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_cargoal1")
def proximity_car_goal_one(is_hard, is_dense, reward_shaping, is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
token = max(obs['hazards_lidar'] - 0.6) // 0.04
return token
if is_linear:
return LinearConstraint('proximity_CarGoal1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
if is_dense:
return SoftDenseConstraint('proximity_CarGoal1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_CarGoal1',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_doggogoal1")
def proximity_doggo_goal_one(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
token = max(obs['hazards_lidar'] - 0.6) // 0.04
return token
if is_linear:
assert False
if is_dense:
return SoftDenseConstraint('proximity_DoggoGoal1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_DoggoGoal1',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_pointgoal2")
def proximity_point_goal_two(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
hazards_token = max(obs['hazards_lidar'] - 0.6) // 0.04
vases_token = max(obs['vases_lidar'] - 0.6) // 0.04
token = max(hazards_token, vases_token)
return token
if is_linear:
return LinearConstraint('proximity_PointGoal2',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
if is_dense:
return SoftDenseConstraint('proximity_PointGoal2',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_PointGoal2',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_cargoal2")
def proximity_car_goal_two(is_hard, is_dense, reward_shaping, is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
hazards_token = max(obs['hazards_lidar'] - 0.6) // 0.04
vases_token = max(obs['vases_lidar'] - 0.6) // 0.04
token = max(hazards_token, vases_token)
return token
if is_linear:
return LinearConstraint('proximity_CarGoal2',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
if is_dense:
return SoftDenseConstraint('proximity_CarGoal2',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_CarGoal2',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_doggogoal2")
def proximity_doggo_goal_two(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
hazards_token = max(obs['hazards_lidar'] - 0.6) // 0.04
vases_token = max(obs['vases_lidar'] - 0.6) // 0.04
token = max(hazards_token, vases_token)
return token
if is_linear:
assert False
if is_dense:
return SoftDenseConstraint('proximity_DoggoGoal2',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_DoggoGoal2',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_pointbutton1")
def proximity_point_button_one(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
hazards_token = max(obs['hazards_lidar'] - 0.6) // 0.04
gremlins_token = max(obs['gremlins_lidar'] - 0.6) // 0.04
wrong_button_token = max(obs['buttons_lidar'] - obs['goal_lidar'] -
0.6) // 0.04
token = max(hazards_token, gremlins_token, wrong_button_token)
return token
if is_linear:
return LinearConstraint('proximity_PointButton1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
if is_dense:
return SoftDenseConstraint('proximity_PointButton1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_PointButton1',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_carbutton1")
def proximity_car_button_one(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
hazards_token = max(obs['hazards_lidar'] - 0.6) // 0.04
gremlins_token = max(obs['gremlins_lidar'] - 0.6) // 0.04
wrong_button_token = max(obs['buttons_lidar'] - obs['goal_lidar'] -
0.6) // 0.04
token = max(hazards_token, gremlins_token, wrong_button_token)
return token
if is_linear:
return LinearConstraint('proximity_CarButton1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
if is_dense:
return SoftDenseConstraint('proximity_CarButton1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_CarButton1',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_doggobutton1")
def proximity_doggo_button_one(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
hazards_token = max(obs['hazards_lidar'] - 0.6) // 0.04
gremlins_token = max(obs['gremlins_lidar'] - 0.6) // 0.04
wrong_button_token = max(obs['buttons_lidar'] - obs['goal_lidar'] -
0.6) // 0.04
token = max(hazards_token, gremlins_token, wrong_button_token)
return token
if is_linear:
return False
if is_dense:
return SoftDenseConstraint('proximity_DoggoButton1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_DoggoButton1',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_pointbutton2")
def proximity_point_button_two(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
hazards_token = max(obs['hazards_lidar'] - 0.6) // 0.04
gremlins_token = max(obs['gremlins_lidar'] - 0.6) // 0.04
wrong_button_token = max(obs['buttons_lidar'] - obs['goal_lidar'] -
0.6) // 0.04
token = max(hazards_token, gremlins_token, wrong_button_token)
return token
if is_linear:
return LinearConstraint('proximity_PointButton2',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
if is_dense:
return SoftDenseConstraint('proximity_PointButton2',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_PointButton2',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_carbutton2")
def proximity_car_button_two(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
hazards_token = max(obs['hazards_lidar'] - 0.6) // 0.04
gremlins_token = max(obs['gremlins_lidar'] - 0.6) // 0.04
wrong_button_token = max(obs['buttons_lidar'] - obs['goal_lidar'] -
0.6) // 0.04
token = max(hazards_token, gremlins_token, wrong_button_token, 0)
return token
if is_linear:
return LinearConstraint('proximity_CarButton2',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
if is_dense:
return SoftDenseConstraint('proximity_CarButton2',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_CarButton2',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_doggobutton2")
def proximity_doggo_button_two(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
hazards_token = max(obs['hazards_lidar'] - 0.6) // 0.04
gremlins_token = max(obs['gremlins_lidar'] - 0.6) // 0.04
wrong_button_token = max(obs['buttons_lidar'] - obs['goal_lidar'] -
0.6) // 0.04
token = max(hazards_token, gremlins_token, wrong_button_token)
return token
if is_linear:
return False
if is_dense:
return SoftDenseConstraint('proximity_DoggoButton2',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
return Constraint('proximity_DoggoButton2',
dfa_string,
is_hard,
reward_shaping,
translation_fn=translation_fn,
inv_translation_fn=lambda token: [token])
@register("proximity_pointpush1")
def proximity_point_push_one(is_hard,
is_dense,
reward_shaping,
is_linear=False):
with open("../constraint/constraints/proximity_highres.lisp") as dfa_file:
dfa_string = dfa_file.read()
def translation_fn(obs, action, done):
hazards_token = max(obs['hazards_lidar'] - 0.6) // 0.04
token = hazards_token
return token
if is_linear:
return LinearConstraint('proximity_pointpush1',
dfa_string,
reward_shaping,
translation_fn=translation_fn,
gamma=0.99)
if is_dense:
return SoftDenseConstraint('proximity_pointpush1',
dfa_string,
| |
<filename>submit_to_anubis.py
#!/usr/bin/env python
################################################################################
# Script for submitting samples to the ANUBIS analysis service.
#
# Last change: 19/October/2009
# Contact: <EMAIL>
#
# Usage: submit_to_anubis.py [options] ANALYSIS_SUBJECT_1 ANALYSIS_SUBJECT_2 ...
# OR submit_to_anubis.py [options] -r DIRECTORY
#
# Options:
# -h, --help show this help message and exit
# -a ANALYSIS_TYPE, --analysis-type=ANALYSIS_TYPE
# specifies the type of ANALYSIS_SUBJECT. One of ['URL',
# 'FILE']. The default is FILE.
# -e EMAIL, --email=EMAIL
# specifies the recipient of the analysis result. As
# soon as the analysis-server has finished processing
# the file the generated report will be sent to the
# given emailaddress. You can choose to omit this
# argument if you do not want to receive an email-
# message containg the analysis-result
# -r, --recursive recursively submit all samples found
# -u USER, --user=USER the name of your user if any
# -p PASSWORD, --password=PASSWORD
# the correct password for your user
# --ssl connect via SSL
# -f, --force-analysis force Anubis to rerun the analysis for this
# sample even if a cached report exists. (Works only
# when the user has sufficient privileges.)
# --keep-files=KEEP_FILES
# specifies which result files too keep
# in addition to the profile and the xml-report.
# One of ['ALL', 'ANUBIS_LOG', 'NORMAL']. default is
# NORMAL. Requires
# sufficient user privileges.
# --anubis-date=ANUBIS_DATE
# changes the date in the virtual environment where the
# binary to the specified date. Format: YYYY-MM-DD.
# Requires sufficient user privileges.
# --timeout=TIMEOUT specifies a different timeout value. Requires
# sufficient user privileges.
#
#
# ANALYSIS_SUBJECT_1/ANALYSIS_SUBJECT_2/...:
# Depending on the analysis-type parameter these arguments will
# be interpreted as the name of a file that shall be uploaded to
# anubis or an URL that shall be analyzed by Anubis.
# In case of analysis_type='FILE' it specifies the relative or
# absolute path to the file that will be
# sent to the analysis-server.
# If - is given for the filename the file is read from stdin.
# Note: On Windows you have start the python interpreter with -u
# in order to have a stdin-stream in binary mode.
#
# The script returns 0 for successful submissions and values > 0 in case of
# an error.
#
# Example: python ./submit_to_anubis.py --email <EMAIL> testfile.exe
#
################################################################################
SECLAB_URL = "http://anubis.iseclab.org/submit.php"
SECLAB_RESULT_URL = "http://anubis.iseclab.org/?action=result&"
import sys
import optparse
import os
import time
import httplib, urllib, urlparse
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email import Encoders
from email.MIMEBase import MIMEBase
# the number of sucessfully submitted samples
num_success = 0
# the number of samples that we failed to submit
num_failed = 0
# stores the parsed command-line arguments
cmdline = None
# This function was copied from an email of <NAME> and
# afterwards modified
def httprequest(url, postdata={}, headers=None, ssl = False):
"""A urllib.urlopen() replacement for http://... that gets the
content-type right for multipart POST requests.
"url" is the http URL to open.
"postdata" is a dictionary describing data to post. If the dict is
empty (the default) a GET request is made, otherwise a POST
request is made. Each postdata item maps a string name to
either:
- a string value; or
- a file part specification of the form:
{"filename": <filename>, # file to load content from
"content": <content>, # (optional) file content
"headers": <headers>} # (optional) headers
<filename> is used to load the content (can be overridden by
<content>) and as the filename to report in the request.
<headers> is a dictionary of headers to use for the part.
Note: currently the file part content but be US-ASCII text.
"headers" is an optional dictionary of headers to send with the
request. Note that the "Content-Type" and "Content-Length"
headers are automatically determined.
The current urllib.urlopen() *always* uses:
Content-Type: application/x-www-form-urlencoded
for POST requests. This is incorrect if the postdata includes a file
to upload. If a file is to be posted the post data is:
Content-Type: multipart/form-data
This returns the response content if the request was successfull
(HTTP code 200). Otherwise an IOError is raised.
For example, this invocation:
url = 'http://www.perl.org/survey.cgi'
postdata = {
"name": "<NAME>",
"email": "<EMAIL>",
"gender": "M",
"born": "1964",
"init": {"filename": "~/.profile"},
}
Inspiration: Perl's HTTP::Request module.
http://aspn.activestate.com/ASPN/Reference/Products/ActivePerl/site/lib/HTTP/Request/Common.html
"""
if not url.startswith("http://"):
raise "Invalid URL, only http:// URLs are allow: url='%s'" % url
if not headers:
headers = {}
if not postdata:
method = "GET"
body = None
else:
method = "POST"
# Determine if require a multipart content-type: 'contentType'.
for part in postdata.values():
if isinstance(part, dict):
contentType = "multipart/form-data"
break
else:
contentType = "application/x-www-form-urlencoded"
headers["Content-Type"] = contentType
# Encode the post data: 'body'.
if contentType == "application/x-www-form-urlencoded":
body = urllib.urlencode(postdata)
elif contentType == "multipart/form-data":
message = MIMEMultipart(_subtype="form-data")
for name, value in postdata.items():
if isinstance(value, dict):
# Get content.
if "content" in value:
content = value["content"]
else:
fp = open(value["filename"], "rb")
content = fp.read()
part = MIMEBase('application', "octet-stream")
part.set_payload( content )
# Encoders.encode_base64(part)
# Add content-disposition header.
dispHeaders = value.get("headers", {})
if "Content-Disposition" not in dispHeaders:
#XXX Should be a case-INsensitive check.
part.add_header("Content-Disposition", "form-data",
name=name, filename=value["filename"])
for dhName, dhValue in dispHeaders:
part.add_header(dhName, dhValue)
else:
# Do not use ctor to set payload to avoid adding a
# trailing newline.
part = MIMEText(None)
part.set_payload(value, "us-ascii")
part.add_header("Content-Disposition", "form-data",
name=name)
message.attach(part)
message.epilogue = "" # Make sure body ends with a newline.
# Split off the headers block from the .as_string() to get
# just the message content. Also add the multipart Message's
# headers (mainly to get the Content-Type header _with_ the
# boundary attribute).
headerBlock, body = message.as_string().split("\n\n",1)
for hName, hValue in message.items():
headers[hName] = hValue
#print "XXX ~~~~~~~~~~~~ multi-part body ~~~~~~~~~~~~~~~~~~~"
#import sys
#sys.stdout.write(body)
#print "XXX ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
else:
raise "Invalid content-type: '%s'" % contentType
# Make the HTTP request and get the response.
# Precondition: 'url', 'method', 'headers', 'body' are all setup properly.
scheme, netloc, path, parameters, query, fragment = urlparse.urlparse(url)
if parameters or query or fragment:
raise "Unexpected URL form: parameters, query or fragment parts "\
"are not allowed: parameters=%r, query=%r, fragment=%r"\
% (parameters, query, fragment)
if ssl:
conn = httplib.HTTPSConnection(netloc)
else:
conn = httplib.HTTPConnection(netloc)
conn.request(method, path, body, headers)
response = conn.getresponse()
return response
def submit(analysis_type, analysis_subject, aux_files = [],
email_addr = None, user_name = None, user_password = <PASSWORD>,
force_analysis = False, keep_files = None,
anubis_date = None, timeout = None, ssl = False, dump_process = False):
"""
Submits the 'analysis_subject' to ANUBIS.
Returns the task_id of the created task.
"""
if len(aux_files) > 0:
assert(analysis_type == "file")
def get_anubis_error_msg(http_page):
lines = http_page.splitlines()
have_h1 = False
start_ind = 0
end_ind = 0
for line_no, line in enumerate(lines):
if line == '<h1 class="bodystart">Fatal Submission Error</h1>':
have_h1 = True
elif have_h1 and line == "<p>":
start_ind = line_no + 1
elif start_ind > 0 and line == "</p>":
end_ind = line_no - 1
error_msg = http_page
if start_ind > 0 and end_ind >= start_ind and end_ind < len(lines):
error_msg = "\n".join(lines[start_ind:end_ind+1])
# show the first 500 characters
return error_msg[:500]
try:
post_data = {}
if analysis_type == "FILE":
if (analysis_subject == "-"):
content = sys.stdin.read()
fn = "stdin"
else:
content = open(analysis_subject, "rb").read()
fn = analysis_subject
post_data['analysisType'] = 'file'
post_data["executable"] = {"content" : content, "filename" : fn}
elif analysis_type == "URL":
post_data['analysisType'] = 'url'
post_data['url'] = analysis_subject
else:
assert(False)
for c, af in enumerate(aux_files):
content = open(af, "rb").read()
post_data["aux_file[%s]" % c] = {"content" : content, "filename" : af}
if (email_addr):
post_data["notification"] = "email"
post_data["email"] = email_addr
else:
post_data["notification"] = "browser"
post_data["email"] = ""
if (user_name):
post_data["username"] = user_name
post_data["password"] = <PASSWORD>
if force_analysis:
post_data["force_analysis"] = "on"
if anubis_date:
post_data["anubis_date"] = anubis_date
if timeout:
post_data['timeout'] = str(timeout)
if keep_files:
post_data['keep_files_level'] = keep_files.lower()
if dump_process:
post_data["dump_process"] = "on"
response = httprequest(SECLAB_URL, post_data, ssl=ssl)
if response.status == 200 and response.getheader("taskid"):
return response.getheader('taskid')
error_code = response.getheader("AnubisAPI.error.result")
if not error_code:
# legacy code - do our best to find a reasonable explanation
# of what happened
error_code = get_anubis_error_msg(response.read())
print "Error submitting analysis subject '%s'." % (analysis_subject)
print "The anubis server replied: %s" % (error_code)
except IOError:
print "File does not exist!"
except Exception:
import traceback
traceback.print_exc()
return None
def submit_dir(directory,
email_addr = None, user_name = None, user_password = <PASSWORD>,
force_analysis = False, | |
<gh_stars>0
# -*- encoding: utf-8 -*-
from . import FixtureTest
class CountryBoundaryTest(FixtureTest):
def test_boundary(self):
import dsl
z, x, y = (8, 0, 0)
# +---------------------+
# | |
# | aaaaaaa*cccccccc |
# | a b c |
# | a b c |
# | a b c |
# | *bbbbbbb c |
# | c c |
# | c c |
# | cccccccccccccccc |
# | |
# +---------------------+
#
# a = claimed boundary. ways in a boundary=claim relation. we should
# get a linestring geometry for the whole relation from osm2pgsql.
#
# b = "on the ground" boundary. ways in a boundary=administrative
# relation, but ways have disputed=yes, disputed_by=XX on them.
# we pick up each way as an individual geometry.
#
# c = accepted boundary. ways in a boundary=administrative relation,
# ways have no extra tagging on them. the ways in b/c relation
# give us a country polygon, from which we extract the boundary.
#
self.generate_fixtures(
# "a" relation gives us a linestring.
dsl.way(1, dsl.fit_in_tile(
z, x, y, 'LINESTRING(0.1 0.5, 0.1 0.9, 0.5 0.9)'), {
'admin_level': '2',
'boundary': 'claim',
'claimed_by': 'XX',
'source': 'openstreetmap.org',
'type': 'boundary',
'name': 'XX Claim',
}),
# "b" ways give us linestring(s)
dsl.way(2, dsl.fit_in_tile(
z, x, y, 'LINESTRING(0.1 0.5, 0.5 0.5, 0.5 0.9)'), {
'admin_level': '2',
'boundary': 'administrative',
'disputed': 'yes',
'disputed_by': 'XX',
'source': 'openstreetmap.org',
}),
# "b & c" ways + country relation give us a polygon => oriented
# boundary curve. we get an oriented boundary curve for each
# country boundary. (note: b & c together should be a closed ring).
dsl.way(3, dsl.fit_in_tile(
z, x, y,
'LINESTRING(0.5 0.9, 0.9 0.9, 0.9 0.1, 0.1 0.1, 0.1 0.5, '
'0.5 0.5, 0.5 0.9)'), {
'admin_level': '2',
'boundary': 'administrative',
'name': 'XX',
'source': 'openstreetmap.org',
'mz_boundary_from_polygon': True, # need this for hack
}),
dsl.way(4, dsl.fit_in_tile(
z, x, y,
'LINESTRING(0.5 0.9, 0.9 0.9, 0.9 0.1, 0.1 0.1, 0.1 0.5, '
'0.5 0.5, 0.5 0.9)'), {
'admin_level': '2',
'boundary': 'administrative',
'name': 'YY',
'source': 'openstreetmap.org',
'mz_boundary_from_polygon': True, # need this for hack
}),
# this is just here to turn off maritime boundaries for everything
# in this tile.
dsl.way(5, dsl.tile_box(z, x, y), {
'source': 'tilezen.org',
'maritime_boundary': True,
'min_zoom': 0,
'kind': 'maritime',
}),
)
# should get a non-disputed section of border
self.assert_has_feature(
z, x, y, 'boundaries', {
'kind': 'country',
'name:left': 'XX',
'name:right': 'YY',
'name': 'XX - YY',
})
# should get a section recognised only by XX
self.assert_has_feature(
z, x, y, 'boundaries', {
'kind': 'unrecognized_country',
'kind:xx': 'country',
'name': 'XX Claim',
# we should have dropped the claimed_by/disputed_by tags
'claimed_by': type(None),
'disputed_by': type(None),
})
# should get a section recognised _except_ by XX
self.assert_has_feature(
z, x, y, 'boundaries', {
'kind': 'country',
'kind:xx': 'unrecognized_country',
'name': 'XX - YY',
})
def test_claim(self):
# test that a claim by countries BB & CC (and recognised by DD) is
# only kind:country for those countries. everyone else's view is
# kind: unrecognized_country.
#
# TODO: recognized_by not working yet.
import dsl
z, x, y = (8, 0, 0)
self.generate_fixtures(
dsl.way(1, dsl.tile_diagonal(z, x, y), {
'admin_level': '2',
'boundary': 'claim',
'name': 'Extent of CC claim',
'claimed_by': 'CC',
'disputed_by': 'AA;DD',
}),
dsl.way(2, dsl.tile_diagonal(z, x, y), {
'admin_level': '2',
'boundary': 'claim',
'name': 'Extent of BB claim',
'claimed_by': 'BB',
'disputed_by': 'AA',
'recognized_by': 'DD',
}),
dsl.way(3, dsl.tile_diagonal(z, x, y), {
'admin_level': '3',
'boundary': 'administrative',
'name': 'Region Name',
'type': 'boundary',
}),
dsl.way(4, dsl.tile_diagonal(z, x, y), {
'dispute': 'yes',
'disputed_by': 'AA',
'name': 'BB claim', # note: also CC claim?!
}),
)
self.assert_has_feature(
z, x, y, 'boundaries', {
# generally unrecognised
'kind': 'unrecognized_country',
# but BB & CC both claim this as a border
'kind:bb': 'country',
'kind:cc': 'country',
# AA disputes that this border exists. NOTE: the kind:aa is
# added to the output even though it duplicates the kind. this
# is to help with multi-level fallback. see
# https://github.com/tilezen/vector-datasource/pull/1895#discussion_r283912502
'kind:aa': 'unrecognized_country',
# DD recognizes BB's claim, so should see this as a country.
'kind:dd': 'country',
})
# because AA disputes the whole boundary (way 4), all the boundaries
# should be unrecognized (either by default or kind:aa).
with self.features_in_tile_layer(z, x, y, 'boundaries') as features:
for feature in features:
# calculate fallback: prefer kind:aa if it exists, else fall
# back to kind.
props = feature['properties']
kind = props.get('kind')
kind_aa = props.get('kind:aa', kind)
self.assertTrue(kind_aa is not None)
self.assertTrue(kind_aa.startswith('unrecognized_'))
def test_whole_claim(self):
# test that something where XA claims the whole boundary of XB works
# as expected - we get a boundary where XB is a country except in
# XA's viewpoint and a second boundary feature where the whole thing
# is just a boundary of XA in XA's viewpoint.
import dsl
z, x, y = 8, 0, 0
#
# +----------------+
# | |
# | aaaaaaaaaaa |
# | a a |
# | a a |
# | a a |
# | a a |
# | aaaaaaaaaaa |
# | |
# +----------------+
#
# this is mapped in OSM using 3 different elements:
#
# 1. a country boundary relation for XB
# 2. the ways making up the country boundary relation are tagged
# disputed=yes, disputed_by=XA
# 3. a claim relation with claimed_by=XA containing the same ways
# as the country boundary relation.
#
linestring = 'LINESTRING(0.1 0.1, 0.1 0.9, 0.9 0.9, 0.9 0.1, 0.1 0.1)'
self.generate_fixtures(
# country boundary relation gives us a linestring boundary
# extracted from the country polygon.
dsl.way(1, dsl.fit_in_tile(z, x, y, linestring), {
'admin_level': '2',
'boundary': 'administrative',
'name': 'XB',
'source': 'openstreetmap.org',
'type': 'boundary',
'mz_boundary_from_polygon': True,
}),
# ways making up the country boundary tagged disputed
dsl.way(2, dsl.fit_in_tile(z, x, y, linestring), {
'disputed': 'yes',
'disputed_by': 'XA',
'source': 'openstreetmap.org',
}),
# claim relation
dsl.way(3, dsl.fit_in_tile(z, x, y, linestring), {
'admin_level': '2',
'boundary': 'claim',
'claimed_by': 'XA',
'name': "Extent of XA's claim",
'source': 'openstreetmap.org',
}),
)
saw_xa = False
saw_xb = False
with self.features_in_tile_layer(z, x, y, 'boundaries') as features:
for feature in features:
props = feature['properties']
kind = props.get('kind')
if kind == 'disputed_reference_line':
# generally accepted viewpoint, XA should dispute
self.assertEqual(
props.get('kind:xa'), 'unrecognized')
self.assertEqual(props.get('name'), 'XB')
saw_xb = True
elif kind == 'disputed_claim':
# XA's viewpoint, which should claim it as part of XA
self.assertEqual(props.get('kind:xa'), 'country')
self.assertEqual(props.get('name'), "Extent of XA's claim")
saw_xa = True
self.assertTrue(saw_xa, "Expected to see XA's viewpoint boundary")
self.assertTrue(saw_xb, "Expected to see XB's country boundary")
class PlaceTest(FixtureTest):
def test_disputed_capital(self):
import dsl
z, x, y = 16, 0, 0
# a country capital which CN doesn't think is a country capital. this
# is just a test case, and any resemblance to real disputes, living or
# dead, is purely coincidental.
self.generate_fixtures(
dsl.way(1, dsl.tile_centre_shape(z, x, y), {
'name': 'Foo',
'featurecla': 'Admin-0 capital',
'fclass_cn': 'Admin-1 capital',
'scalerank': 4,
'min_zoom': 4,
'source': 'naturalearthdata.com',
}),
)
self.assert_has_feature(
z, x, y, 'places', {
'kind': 'locality',
'country_capital': type(True),
'country_capital:cn': type(False),
'region_capital:cn': type(True),
})
# NOTE: this isn't a test, just a factoring-out of common code used by all
# the subsequent tests, which are testing variations of what happens when
# the default (featurecla) is different from an override (fclass_iso).
def _check(self, default, dispute, expected):
import dsl
z, x, y = 16, 0, 0
self.generate_fixtures(
dsl.way(1, dsl.tile_centre_shape(z, x, y), {
'name': 'Foo',
'featurecla': default,
'fclass_iso': dispute,
'scalerank': 4,
'min_zoom': 4,
'source': 'naturalearthdata.com',
}),
)
expected['kind'] = 'locality'
self.assert_has_feature(z, x, y, 'places', expected)
def test_country_country(self):
# explicit override still comes out in the output
self._check(
'Admin-0 capital',
'Admin-0 capital',
{'country_capital:iso': type(True)}
)
def test_country_region(self):
# region override disables country_capital and adds region_capital.
self._check(
'Admin-0 capital',
'Admin-1 capital',
{
'country_capital:iso': type(False),
'region_capital:iso': type(True),
}
)
def test_country_none(self):
# override to none should just disable country_capital
self._check(
'Admin-0 capital',
'Populated place',
{'country_capital:iso': type(False)}
)
def test_region_country(self):
# override sets country_capital and disables region_capital
self._check(
'Admin-1 capital',
'Admin-0 capital',
{
'country_capital:iso': type(True),
'region_capital:iso': type(False),
}
)
def test_region_region(self):
| |

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Science/ReflectionsOfLightByPlaneAndSphericalMirrors/reflections-of-light-by-plane-and-spherical-mirrors.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
from IPython.display import display, Math, Latex, HTML
HTML('''<script>
function code_toggle() {
if (code_shown){
$('div.input').hide('500');
$('#toggleButton').val('Show Code')
} else {
$('div.input').show('500');
$('#toggleButton').val('Hide Code')
}
code_shown = !code_shown
}
$( document ).ready(function(){
code_shown=false;
$('div.input').hide()
});
</script>
<form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>''')
from helper import *
%matplotlib inline
# Reflection of Light
## by Plane and Spherical Mirrors
## Introduction
When light shines onto the surface of an object, some of the light is reflected, while the rest is either absorbed or transmitted. We can imagine the light consisting of many narrow beams that travel in straight-line paths called **rays**. The light rays that strike the surface are called the **incident rays**. The light rays that reflect off the surface are called the **reflected rays**. This model of light is called the **ray model**, and it can be used to describe many aspects of light, including the reflection and formation of images by plane and spherical mirrors.
## Law of Reflection
<img src="Images/law_of_reflection.svg" width="50%"/>
To measure the angles of the incident and reflected rays, we first draw the **normal**, which is the line perpendicular to the surface. The **angle of incidence, $\theta_{i}$,** is the angle between the incident ray and the normal. Likewise, the **angle of reflection, $\theta_{r}$,** is the angle between the reflected ray and the normal. The incident ray, the reflected ray, and the normal to the reflecting surface all lie within the same plane. This is shown in the figure above. Notice that the angle of reflection is equal to the angle of incidence. This is known as the **law of reflection**, and it can be expressed by the following equation:
$$\theta_{r} = \theta_{i}$$
Use the slider below to change the angle of incidence. This changes the angle between the incident ray and the normal. Notice how the angle of reflection also changes when the slider is moved.
interactive_plot = widgets.interactive(f, Angle=widgets.IntSlider(value=45,min=0,max=90,step=15,continuous_update=False))
output = interactive_plot.children[-1]
output.layout.height = '280px'
interactive_plot
**Question:** *When the angle of incidence increases, what happens to the angle of reflection?*
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "The angle of reflection increases."
option_2 = "The angle of reflection decreases."
option_3 = "The angle of reflection remains constant."
option_4 = "The angle of reflection equals zero."
multiple_choice(option_1, option_2, option_3, option_4)
## Specular and Diffuse Reflections
For a very smooth surface, such as a mirror, almost all of the light is reflected to produce a **specular reflection**. In a specular reflection, the reflected light rays are parallel to one another and point in the same direction. This allows specular reflections to form images. If the surface is not very smooth, then the light may bounce off of the surface in various directions. This produces a **diffuse reflection**. Diffuse reflections cannot form images.
<img src="Images/specular_diffuse_reflections.svg" width="70%"/>
**Note:** The law of reflection still applies to diffuse reflections, even though the reflected rays are pointing in various directions. We can imagine that each small section of the rough surface is like a flat plane orientated differently than the sections around it. Since each of these sections is orientated differently, the angle of incidence is different at each section. This causes the reflected rays to scatter.
**Question:** *Which of the following is an example of a specular reflection?*
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "The reflection off a clean window."
option_2 = "The reflection off a wooden deck."
option_3 = "The reflection off a carpet floor."
option_4 = "The reflection off a table cloth."
multiple_choice(option_1, option_2, option_3, option_4)
**Question:** *Which of the following is an example of a diffuse reflection?*
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "The reflection off a concrete sidewalk."
option_2 = "The reflection off a mirror."
option_3 = "The reflection off the surface of a still lake."
option_4 = "The reflection off a polished sheet of metal."
multiple_choice(option_1, option_2, option_3, option_4)
## Image Formation by Plane Mirrors
A **plane mirror** is simply a mirror made from a flat (or planar) surface. These types of mirrors are commonly found in bedroom or bathroom fixtures. When an object is reflected in a plane mirror, the image of the object appears to be located behind the mirror. This is because our brains interpret the reflected light rays entering our eyes as having travelled in straight-line paths. The light rays entering our eyes simply do not contain enough information for our brains to differentiate between a straight-line path and a path that changed direction due to a reflection.
<img src="Images/plane_mirror_reflection.svg" width="60%"/>
Notice in the figure above that the light rays do not actually converge at the location where the image appears to be formed (behind the mirror). Since the light rays do not actually go behind the mirror, they are represented as projections using dashed lines. If a film were placed at the image location behind the mirror, it would not be able to capture the image. As a result, this type of image is called a **virtual image**.
For objects reflected in a plane mirror, the distance of the image from the mirror, $d_{i}$, is always equal to the distance of the object from the mirror, $d_{o}$. If the object is moved toward the mirror, the image of the object will also move toward the mirror such that the object and the image are always equidistant from the surface of the mirror.
Use the slider below to change the object distance. Notice how the image distance also changes when the slider is moved.
interactive_plot = widgets.interactive(f,Distance=widgets.IntSlider(value=30,min=10,max=50,step=10,continuous_update=False))
output = interactive_plot.children[-1]
output.layout.height = '280px'
interactive_plot
#Print question
distance = round(random.uniform(5,10),1)
print("If you stand " + str(distance) + " m in front of a plane mirror, how many metres behind the mirror is your virtual image?")
#Answer calculation
answer = distance
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(round((answer),1)) + " m"
option_2 = str(round((answer * 2),1)) + " m"
option_3 = str(round((answer / 2),1)) + " m"
option_4 = str(round((answer / 4),1)) + " m"
multiple_choice(option_1, option_2, option_3, option_4)
#Print question
distance = round(random.uniform(5,10),1)
print("If you stand " + str(distance) + " m in front of a plane mirror, how many metres will separate you from your virtual image?")
#Answer calculation
answer = (distance * 2)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(round((answer),1)) + " m"
option_2 = str(round((answer * 2),1)) + " m"
option_3 = str(round((answer / 2),1)) + " m"
option_4 = str(round((answer / 4),1)) + " m"
multiple_choice(option_1, option_2, option_3, option_4)
## Spherical Mirrors
Two common types of curved mirror are formed from a section of a sphere. If the reflection takes place on the inside of the spherical section, then the mirror is called a **concave mirror**. The reflecting surface of a concave mirror curves inward and away from the viewer. If the reflection takes place on the outside of the spherical section, then the mirror is called a **convex mirror**. The reflecting surface of a convex mirror curves outward and toward the viewer.
<img src="Images/concave_convex_mirrors.svg" width="75%"/>
The **centre of curvature, $C$,** is the point located at the centre of the sphere used to create the mirror. The **vertex, $V$,** is the point located at the geometric centre of the mirror itself. The **focus, $F$,** is the point located midway between the centre of curvature and the vertex. The line passing through the centre of curvature and the vertex is called the **principal axis**. Notice that the focus also lies on the principal axis.
When an incident ray parallel to the principle axis strikes the mirror, the reflected ray always passes through the focus. When an incident ray passes through the focus and strikes the mirror, the reflected ray is always parallel to the principle axis. (In the above diagrams, reverse the arrow directions to see this case). These properties make the focus particularly useful when examining spherical mirrors.
**Note:** The distance from the centre of curvature to the vertex is | |
# ccm clusters
import os
import shutil
import time
import subprocess
import signal
import yaml
import uuid
import datetime
from six import print_
from distutils.version import LooseVersion
from ccmlib import common
from ccmlib.cluster import Cluster
from ccmlib.scylla_node import ScyllaNode
from ccmlib.node import NodeError
from ccmlib import scylla_repository
SNITCH = 'org.apache.cassandra.locator.GossipingPropertyFileSnitch'
class ScyllaCluster(Cluster):
def __init__(self, path, name, partitioner=None, install_dir=None,
create_directory=True, version=None, verbose=False,
force_wait_for_cluster_start=False, manager=None, skip_manager_server=False, **kwargs):
install_func = common.scylla_extract_install_dir_and_mode
cassandra_version = kwargs.get('cassandra_version', version)
docker_image = kwargs.get('docker_image')
if cassandra_version:
self.scylla_reloc = True
self.scylla_mode = None
elif docker_image:
self.scylla_reloc = False
self.scylla_mode = None
else:
self.scylla_reloc = False
install_dir, self.scylla_mode = install_func(install_dir)
self.started = False
self.force_wait_for_cluster_start = (force_wait_for_cluster_start != False)
self._scylla_manager = None
self.skip_manager_server = skip_manager_server
self.scylla_version = cassandra_version
super(ScyllaCluster, self).__init__(path, name, partitioner,
install_dir, create_directory,
version, verbose,
snitch=SNITCH, cassandra_version=cassandra_version,
docker_image=docker_image)
if not manager:
scylla_ext_opts = os.getenv('SCYLLA_EXT_OPTS', "").split()
opts_i = 0
while opts_i < len(scylla_ext_opts):
if scylla_ext_opts[opts_i].startswith("--scylla-manager="):
manager = scylla_ext_opts[opts_i].split('=')[1]
opts_i += 1
if manager:
self._scylla_manager = ScyllaManager(self, manager)
if os.path.exists(os.path.join(self.get_path(), common.SCYLLAMANAGER_DIR)):
self._scylla_manager = ScyllaManager(self)
def load_from_repository(self, version, verbose):
install_dir, version = scylla_repository.setup(version, verbose)
install_dir, self.scylla_mode = common.scylla_extract_install_dir_and_mode(install_dir)
return install_dir, version
def create_node(self, name, auto_bootstrap, thrift_interface,
storage_interface, jmx_port, remote_debug_port,
initial_token, save=True, binary_interface=None):
return ScyllaNode(name, self, auto_bootstrap, thrift_interface,
storage_interface, jmx_port, remote_debug_port,
initial_token, save, binary_interface, scylla_manager=self._scylla_manager)
# copy from cluster
def __update_pids(self, started):
for node, p, _ in started:
node._update_pid(p)
def start_nodes(self, nodes=None, no_wait=False, verbose=False, wait_for_binary_proto=None,
wait_other_notice=None, jvm_args=None, profile_options=None,
quiet_start=False):
if wait_for_binary_proto is None:
wait_for_binary_proto = self.force_wait_for_cluster_start
if wait_other_notice is None:
wait_other_notice = self.force_wait_for_cluster_start
self.debug(f"start_nodes: no_wait={no_wait} wait_for_binary_proto={wait_for_binary_proto} wait_other_notice={wait_other_notice} force_wait_for_cluster_start={self.force_wait_for_cluster_start}")
self.started=True
p = None
if jvm_args is None:
jvm_args = []
marks = []
if wait_other_notice:
marks = [(node, node.mark_log()) for node in self.nodes.values() if node.is_running()]
if nodes is None:
nodes = self.nodes.values()
elif isinstance(nodes, ScyllaNode):
nodes = [nodes]
started = []
for node in nodes:
if not node.is_running():
if started:
last_node, _, last_mark = started[-1]
last_node.watch_log_for("node is now in normal status|Starting listening for CQL clients",
verbose=verbose, from_mark=last_mark,
process=last_node._process_scylla)
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
p = node.start(update_pid=False, jvm_args=jvm_args,
profile_options=profile_options, no_wait=no_wait,
wait_for_binary_proto=wait_for_binary_proto,
wait_other_notice=wait_other_notice)
started.append((node, p, mark))
marks.append((node, mark))
self.__update_pids(started)
for node, p, _ in started:
if not node.is_running():
raise NodeError("Error starting {0}.".format(node.name), p)
if wait_for_binary_proto:
for node, _, mark in started:
node.watch_log_for("Starting listening for CQL clients",
verbose=verbose, from_mark=mark)
if wait_other_notice:
for old_node, mark in marks:
for node, _, _ in started:
if old_node is not node:
old_node.watch_log_for_alive(node, from_mark=mark)
return started
# override cluster
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=None,
wait_other_notice=None, jvm_args=None, profile_options=None,
quiet_start=False):
kwargs = dict(**locals())
del kwargs['self']
started = self.start_nodes(**kwargs)
if self._scylla_manager and not self.skip_manager_server:
self._scylla_manager.start()
return started
def stop_nodes(self, nodes=None, wait=True, gently=True, wait_other_notice=False, other_nodes=None, wait_seconds=None):
if nodes is None:
nodes = self.nodes.values()
elif isinstance(nodes, ScyllaNode):
nodes = [nodes]
marks = []
if wait_other_notice:
if not other_nodes:
other_nodes = [node for node in self.nodes.values() if not node in nodes]
marks = [(node, node.mark_log()) for node in other_nodes if node.is_live()]
# stop all nodes in parallel
for node in nodes:
node.do_stop(gently=gently)
# wait for stopped nodes is needed
if wait or wait_other_notice:
for node in nodes:
node.wait_until_stopped(wait_seconds, marks, dump_core=gently)
return [node for node in nodes if not node.is_running()]
def stop(self, wait=True, gently=True, wait_other_notice=False, other_nodes=None, wait_seconds=None):
if self._scylla_manager and not self.skip_manager_server:
self._scylla_manager.stop(gently)
kwargs = dict(**locals())
del kwargs['self']
return self.stop_nodes(**kwargs)
def get_scylla_mode(self):
return self.scylla_mode
def is_scylla_reloc(self):
return self.scylla_reloc
def enable_internode_ssl(self, node_ssl_path, internode_encryption='all'):
shutil.copyfile(os.path.join(node_ssl_path, 'trust.pem'), os.path.join(self.get_path(), 'internode-trust.pem'))
shutil.copyfile(os.path.join(node_ssl_path, 'ccm_node.pem'), os.path.join(self.get_path(), 'internode-ccm_node.pem'))
shutil.copyfile(os.path.join(node_ssl_path, 'ccm_node.key'), os.path.join(self.get_path(), 'internode-ccm_node.key'))
node_ssl_options = {
'internode_encryption': internode_encryption,
'certificate': os.path.join(self.get_path(), 'internode-ccm_node.pem'),
'keyfile': os.path.join(self.get_path(), 'internode-ccm_node.key'),
'truststore': os.path.join(self.get_path(), 'internode-trust.pem'),
}
self._config_options['server_encryption_options'] = node_ssl_options
self._update_config()
def _update_config(self, install_dir=None):
"""
add scylla specific item to the cluster.conf
:return: None
"""
super(ScyllaCluster, self)._update_config()
filename = os.path.join(self.get_path(), 'cluster.conf')
with open(filename, 'r') as f:
data = yaml.safe_load(f)
if self.is_scylla_reloc():
data['scylla_version'] = self.scylla_version
if self._scylla_manager and self._scylla_manager.install_dir:
data['scylla_manager_install_path'] = self._scylla_manager.install_dir
with open(filename, 'w') as f:
yaml.safe_dump(data, f)
def sctool(self, cmd):
if self._scylla_manager == None:
raise Exception("scylla manager not enabled - sctool command cannot be executed")
return self._scylla_manager.sctool(cmd)
def start_scylla_manager(self):
if not self._scylla_manager or self.skip_manager_server:
return
self._scylla_manager.start()
def stop_scylla_manager(self, gently=True):
if not self._scylla_manager or self.skip_manager_server:
return
self._scylla_manager.stop(gently)
def upgrade_cluster(self, upgrade_version):
"""
Support when uses relocatable packages only
:param upgrade_version: relocatables name. Example: unstable/master:2020-11-18T08:57:53Z
"""
for node in self.nodelist():
node.upgrade(upgrade_version)
self._update_config(install_dir=self.nodelist()[0].node_install_dir)
class ScyllaManager:
def __init__(self, scylla_cluster, install_dir=None):
self.scylla_cluster = scylla_cluster
self._process_scylla_manager = None
self._pid = None
self.auth_token = <PASSWORD>(uuid.<PASSWORD>())
self.install_dir = install_dir
if install_dir:
if not os.path.exists(self._get_path()):
os.mkdir(self._get_path())
self._install(install_dir)
else:
self._update_pid()
@property
def version(self):
stdout, _ = self.sctool(["version"], ignore_exit_status=True)
version_string = stdout[stdout.find(": ") + 2:].strip() # Removing unnecessary information
version_code = LooseVersion(version_string)
return version_code
def _install(self, install_dir):
self._copy_config_files(install_dir)
self._copy_bin_files(install_dir)
self._update_config(install_dir)
def _get_api_address(self):
return "%s:5080" % self.scylla_cluster.get_node_ip(1)
def _update_config(self, install_dir=None):
conf_file = os.path.join(self._get_path(), common.SCYLLAMANAGER_CONF)
with open(conf_file, 'r') as f:
data = yaml.safe_load(f)
data['http'] = self._get_api_address()
if not 'database' in data:
data['database'] = {}
data['database']['hosts'] = [self.scylla_cluster.get_node_ip(1)]
data['database']['replication_factor'] = 3
if install_dir and (self.version < LooseVersion("2.5") or
LooseVersion('666') < self.version < LooseVersion('666.dev-0.20210430.2217cc84')):
data['database']['migrate_dir'] = os.path.join(install_dir, 'schema', 'cql')
if 'https' in data:
del data['https']
if 'tls_cert_file' in data:
del data['tls_cert_file']
if 'tls_key_file' in data:
del data['tls_key_file']
if not 'logger' in data:
data['logger'] = {}
data['logger']['mode'] = 'stderr'
if not 'repair' in data:
data['repair'] = {}
if self.version < LooseVersion("2.2"):
data['repair']['segments_per_repair'] = 16
data['prometheus'] = "{}:56091".format(self.scylla_cluster.get_node_ip(1))
# Changing port to 56091 since the manager and the first node share the same ip and 56090 is already in use
# by the first node's manager agent
data["debug"] = "{}:5611".format(self.scylla_cluster.get_node_ip(1))
# Since both the manager server and the first node use the same address, the manager can't use port
# 56112, as node 1's agent already seized it
if 'ssh' in data:
del data['ssh']
keys_to_delete = []
for key in data:
if not data[key]:
keys_to_delete.append(key) # can't delete from dict during loop
for key in keys_to_delete:
del data[key]
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def _copy_config_files(self, install_dir):
conf_dir = os.path.join(install_dir, 'etc')
if not os.path.exists(conf_dir):
raise Exception("%s is not a valid scylla-manager install dir" % install_dir)
for name in os.listdir(conf_dir):
filename = os.path.join(conf_dir, name)
if os.path.isfile(filename):
shutil.copy(filename, self._get_path())
def _copy_bin_files(self, install_dir):
os.mkdir(os.path.join(self._get_path(), 'bin'))
files = ['sctool', 'scylla-manager', 'scylla-manager-agent']
for name in files:
src = os.path.join(install_dir, name)
if not os.path.exists(src):
raise Exception("%s not found in scylla-manager install dir" % src)
shutil.copy(src,os.path.join(self._get_path(), 'bin', name))
@property
def is_agent_available(self):
return os.path.exists(os.path.join(self._get_path(), 'bin', 'scylla-manager-agent'))
def _get_path(self):
return os.path.join(self.scylla_cluster.get_path(), common.SCYLLAMANAGER_DIR)
def _get_pid_file(self):
return os.path.join(self._get_path(), "scylla-manager.pid")
def _update_pid(self):
if not os.path.isfile(self._get_pid_file()):
return
start = time.time()
pidfile = self._get_pid_file()
while not (os.path.isfile(pidfile) and os.stat(pidfile).st_size > 0):
if time.time() - start > 30.0:
print_("Timed out waiting for pidfile {} to be filled (current time is %s): File {} size={}".format(
pidfile,
datetime.now(),
'exists' if os.path.isfile(pidfile) else 'does not exist' if not os.path.exists(pidfile) else 'is not a file',
os.stat(pidfile).st_size if os.path.exists(pidfile) else -1))
break
else:
time.sleep(0.1)
try:
with open(self._get_pid_file(), 'r') as f:
self._pid = int(f.readline().strip())
except IOError as e:
raise NodeError('Problem starting scylla-manager due to %s' %
(e))
def start(self):
# some configurations are set post initialisation (cluster id) so
# we are forced to update the config prior to calling start
self._update_config(self.install_dir)
# check process is not running
if self._pid:
try:
os.kill(self._pid, 0)
return
except OSError as err:
pass
log_file = os.path.join(self._get_path(),'scylla-manager.log')
scylla_log = open(log_file, 'a')
if os.path.isfile(self._get_pid_file()):
os.remove(self._get_pid_file())
args = [os.path.join(self._get_path(), 'bin', 'scylla-manager'),
'--config-file', os.path.join(self._get_path(), 'scylla-manager.yaml')]
self._process_scylla_manager = subprocess.Popen(args, stdout=scylla_log,
stderr=scylla_log,
close_fds=True)
self._process_scylla_manager.poll()
with open(self._get_pid_file(), 'w') as pid_file:
pid_file.write(str(self._process_scylla_manager.pid))
api_interface = common.parse_interface(self._get_api_address(), 5080)
if not common.check_socket_listening(api_interface,timeout=180):
raise Exception("scylla manager interface %s:%s is not listening after 180 seconds, scylla manager may have failed to start."
% (api_interface[0], api_interface[1]))
return self._process_scylla_manager
def stop(self, gently):
if self._process_scylla_manager:
if gently:
try:
self._process_scylla_manager.terminate()
except OSError as e:
pass
else:
try:
self._process_scylla_manager.kill()
except OSError as e:
pass
else:
if self._pid:
signal_mapping = {True: signal.SIGTERM, False: signal.SIGKILL}
try:
os.kill(self._pid, signal_mapping[gently])
except OSError:
pass
def sctool(self, cmd, ignore_exit_status=False):
sctool = os.path.join(self._get_path(), 'bin', 'sctool')
args = [sctool, '--api-url', "http://%s/api/v1" % self._get_api_address()]
args += cmd
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = p.communicate()
exit_status = p.wait()
if exit_status != 0 and not ignore_exit_status:
raise Exception(" ".join(args), exit_status, stdout, stderr)
return stdout, stderr
def agent_check_location(self, location_list, extra_config_file_list=None):
agent_bin | |
False
elif language == 'stick':
rhyctx.durations_stick = True
else:
raise ValueError("unknown durations behavior %s; try temporary or stick"
% language)
# Instruments
def add_sfx(self, words):
if len(words) != 4 or words[2] != 'on':
raise ValueError("must have 4 words: sfx SFXNAME on CHANNELTYPE")
_, name, _, channel = words
try:
channel = channeltypes[channel]
except KeyError:
raise ValueError("unknown channel; try pulse, triangle, or noise")
if self.cur_song is not None:
name = '::'.join((self.cur_song.name, name))
if name in self.sfxs:
raise ValueError("sfx %s was already defined on line %d"
% (name, self.sfxs[name].linenum))
inst = PentlySfx(channel, pitchctx=self.pitchctx,
name=name, linenum=self.linenum)
self.sfxs[name] = inst
self.cur_obj = ('sfx', inst)
def add_instrument(self, words):
if len(words) != 2:
raise ValueError("must have 2 words: instrument INSTNAME")
name = words[1]
if self.cur_song is not None:
name = '::'.join((self.cur_song.name, name))
if name in self.instruments:
raise ValueError("instrument %s was already defined on line %d"
% (name, self.instruments[name].linenum))
inst = PentlyInstrument(name=name, linenum=self.linenum)
self.instruments[name] = inst
self.cur_obj = ('instrument', inst)
def add_rate(self, words):
self.ensure_in_object('rate', 'sfx')
if len(words) != 2:
raise ValueError("must have 2 words: rate FRAMESPERSTEP")
rate = int(words[1])
self.cur_obj[1].set_rate(rate, linenum=self.linenum)
def add_volume(self, words):
self.ensure_in_object('volume', ('sfx', 'instrument'))
if len(words) < 2:
raise ValueError("volume requires at least one step")
obj = self.cur_obj[1]
obj.set_volume([int(x) for x in words[1:] if x != '|'],
linenum=self.linenum)
def add_decay(self, words):
self.ensure_in_object('decay', 'instrument')
if len(words) != 2:
raise ValueError("must have 2 words: decay UNITSPER16FRAMES")
obj = self.cur_obj[1]
obj.set_decay(int(words[1]), linenum=self.linenum)
def add_timbre(self, words):
self.ensure_in_object('timbre', ('sfx', 'instrument'))
if len(words) < 2:
raise ValueError("timbre requires at least one step")
obj = self.cur_obj[1]
obj.set_timbre(words[1:], linenum=self.linenum)
def add_pitch(self, words):
self.ensure_in_object('pitch', ('sfx', 'instrument'))
if len(words) < 2:
raise ValueError("pitch requires at least one step")
obj = self.cur_obj[1]
obj.set_pitch(words[1:], linenum=self.linenum)
def add_detached(self, words):
self.ensure_in_object('detached', 'instrument')
if len(words) > 1:
raise ValueError("detached in instrument takes no arguments")
self.cur_obj[1].set_detached(True)
def add_drum(self, words):
if len(words) not in (3, 4):
raise ValueError("must have 3 words: drum DRUMNAME")
self.cur_obj = None
drumname = words[1]
if self.cur_song is not None:
drumname = '::'.join((self.cur_song.name, drumname))
if drumname in self.drums:
raise ValueError("drum %s was already defined on line %d"
% (drumname, self.drums[drumname].linenum))
d = PentlyDrum(words[2:], name=drumname, linenum=self.linenum)
self.drums[drumname] = d
# Songs and patterns
def add_song(self, words):
if len(words) != 2:
raise ValueError("must have 2 words: song SONGNAME")
if self.cur_song:
raise ValueError("song %s began on line %d and was not ended with fine or dal segno"
% (self.cur_song.name, self.cur_song.linenum))
self.cur_obj = None
songname = words[1]
if songname in self.songs:
oldlinenum = self.songs[songname].linenum
raise ValueError("song %s was already defined on line %d"
% (songname, oldlinenum))
song = PentlySong(pitchctx=self.pitchctx, rhyctx=self.rhyctx,
name=songname, linenum=self.linenum)
self.cur_song = self.songs[songname] = song
def end_song(self, words):
if not self.cur_song:
raise ValueError("no song is open")
song = self.cur_song
words = ' '.join(words).lower()
if words == 'fine':
endcmd = 'fine'
elif words in ('dal segno', 'dalsegno'):
endcmd = 'dalSegno'
elif words in ('da capo', 'dacapo'):
if song.segno_linenum is not None:
raise ValueError("cannot loop to start because segno was set on line %d"
% song.segno_linenum)
endcmd = 'dalSegno'
else:
raise ValueError('song end must be "fine" or "dal segno" or "da capo", not '
+ end)
song.conductor.append(endcmd)
song.bytesize += 1
self.cur_song = self.cur_obj = None
def add_segno(self, words):
if len(words) > 1:
raise ValueError('segno takes no arguments')
if not self.cur_song:
raise ValueError("no song is open")
song = self.cur_song
if song.segno_linenum is not None:
raise ValueError('loop point for song %s was already set at line %d'
% (self.cur_song.name, self.segno_linenum))
song.conductor.append('segno')
song.bytesize += 1
song.segno_linenum = self.linenum
self.cur_obj = None
def add_time(self, words):
if len(words) < 2:
raise ValueError('no time signature given')
if len(words) > 2 and (len(words) != 4 or words[2] != 'scale'):
raise ValueError("time with scale must have 4 words: time N/D scale D")
try:
sp = timesignames[words[1].lower()]
except KeyError:
sp = words[1].split('/', 1)
if len(sp) != 2:
raise ValueError("time signature must be a fraction separated by /")
timenum, timeden = int(sp[0]), int(sp[1])
rhyctx = self.get_pitchrhy_parent().rhyctx
rhyctx.set_time_signature(timenum, timeden)
if len(words) > 2:
self.add_scale(words[2:])
def add_scale(self, words):
if len(words) != 2:
raise ValueError("must have 2 words: scale ROWVALUE")
target = self.get_pitchrhy_parent()
target.rhyctx.set_scale(int(words[1]))
def add_tempo(self, words):
if not self.cur_song:
raise ValueError("tempo must be used in a song")
if len(words) != 2:
raise ValueError("must have 2 words: pickup MEASURE[:BEAT[:ROW]]")
tempo = float(words[1])
if not 1.0 <= tempo <= 1500.0:
raise ValueError("tempo must be positive and no more than 1500 rows per minute")
song = self.cur_song
song.rhyctx.tempo = tempo # to be picked up on next wait rows
def add_song_wait(self, words):
if not self.cur_song:
raise ValueError("at must be used in a song")
if len(words) < 2:
raise ValueError("must have 2 words: at MEASURE[:BEAT[:ROW]]")
song = self.cur_song
mbr = [int(x) for x in words[1].split(':', 2)]
# If we're waiting at least one row, update the tempo and
# put in a wait command
rows_to_wait = song.rhyctx.wait_for_measure(*mbr)
song.wait_rows(rows_to_wait)
self.cur_obj = None # end any song-local pattern or instrument
if len(words) > 2:
self.dokeyword(words[2:])
def add_song_pickup(self, words):
if not self.cur_song:
raise ValueError("at must be used in a song")
if len(words) != 2:
raise ValueError("must have 2 words: pickup MEASURE[:BEAT[:ROW]]")
rhyctx = self.cur_song.rhyctx
mbr = [int(x) for x in words[1].split(':', 2)]
rhyctx.set_measure(*mbr)
@staticmethod
def extract_prepositions(words):
return dict(zip(words[2::2], words[3::2]))
def add_attack(self, words):
if len(words) != 3 or words[1] != 'on':
raise ValueError('syntax: attack on CHANNELNAME')
if self.cur_song.name is None:
raise ValueError('no song is open')
self.cur_obj = None
chname = words[2]
song = self.cur_song
song.set_attack(chname)
def add_play(self, words):
if len(words) % 2 != 0:
raise ValueError('syntax: pattern PATTERNNAME [on TRACK] [with INSTRUMENT]')
if self.cur_song.name is None:
raise ValueError('no song is open')
self.cur_obj = None
patname = words[1]
pps = self.extract_prepositions(words)
track = pps.pop('on', None)
instrument = pps.pop('with', None)
transpose = int(pps.pop('up', 0)) - int(pps.pop('down', 0))
if pps:
raise ValueError("unknown prepositions: " + " ".join(pps))
song = self.cur_song
song.play(patname, track=track, instrument=instrument,
transpose=transpose)
def add_stop(self, words):
if self.cur_song.name is None:
raise ValueError('no song is open')
self.cur_obj = None
if len(words) < 2:
raise ValueError('must stop at least one track')
self.cur_song.stop_tracks(words[1:])
def add_pattern(self, words):
if len(words) % 2 != 0:
raise ValueError('syntax: pattern PATTERNNAME [on TRACK] [with INSTRUMENT]')
patname = words[1]
if patname in self.patterns:
raise ValueError("pattern %s was already defined on line %d"
% (patname, self.patterns[patname].linenum))
if self.cur_song is not None:
patname = '::'.join((self.cur_song.name, patname))
pitchrhy = self.cur_song
else:
pitchrhy = self
pps = self.extract_prepositions(words)
track = pps.pop('on', None)
if track and track not in pitched_tracks:
raise ValueError('unknown track ' + track)
instrument = pps.pop('with', None)
if pps:
raise ValueError("unknown prepositions: " + " ".join(pps))
pat = PentlyPattern(pitchctx=pitchrhy.pitchctx, rhyctx=pitchrhy.rhyctx,
instrument=instrument, track=track,
name=patname, linenum=self.linenum)
self.patterns[patname] = pat
self.cur_obj = ('pattern', pat)
def add_fallthrough(self, words):
if len(words) > 1:
raise ValueError("fallthrough takes no arguments")
self.ensure_in_object('fallthrough', 'pattern')
self.cur_obj[1].set_fallthrough(True)
self.cur_obj = None
def add_definition(self, name, value):
if name.startswith('EN'):
self.get_pitchrhy_parent().pitchctx.add_arp_name(name[2:], value)
return
raise ValueError("unknown definable %s" % repr(name))
keywordhandlers = {
'notenames': add_notenames,
'durations': add_durations,
'sfx': add_sfx,
'volume': add_volume,
'rate': add_rate,
'decay': add_decay,
'timbre': add_timbre,
'pitch': add_pitch,
'instrument': add_instrument,
'drum': add_drum,
'detached': add_detached,
'song': add_song,
'fine': end_song,
'dal': end_song,
'dalSegno': end_song,
'da': end_song,
'daCapo': end_song,
'segno': add_segno,
'time': add_time,
'scale': add_scale,
'attack': add_attack,
'pattern': add_pattern,
'fallthrough': add_fallthrough,
'at': add_song_wait,
'pickup': add_song_pickup,
'tempo': add_tempo,
'play': add_play,
'stop': add_stop,
}
def dokeyword(self, words):
if words[0].startswith('@'):
defmatch = ' '.join(words).split("=", 1)
if len(defmatch) > 1:
self.add_definition(defmatch[0][1:].rstrip(), defmatch[1].strip())
return
try:
kwh = self.keywordhandlers[words[0]]
except KeyError:
pass
else:
return kwh(self, words)
if self.cur_obj and self.cur_obj[0] == 'pattern':
pat = self.cur_obj[1]
for word in words:
pat.add_pattern_note(word)
return
if self.unk_keywords < 100:
print("unknown keyword %s inside %s"
% (repr(words), self.cur_obj or self.cur_song.name),
file=sys.stderr)
self.unk_keywords += 1
# Pass 3: Try to find envelopes that overlap envelopes
def wrapdata(atoms, lineprefix, maxlength=79):
lpfx = len(lineprefix)
maxlength -= lpfx
out, lout = [], 0
for atom in atoms:
lthis = len(atom)
if len(out) > 0 and lthis + lout > maxlength:
yield lineprefix+','.join(out)
out, lout = [], 0
out.append(atom)
lout += lthis + 1
yield lineprefix+','.join(out)
def format_dbyt(n):
return '$%04x' % n
def print_all_dicts(parser):
print("\nParsed %d lines, with %d using a | |
Constraint(expr=m.x28*m.x2514 + m.x653*m.x2520 + m.x1278*m.x2526 + m.x1903*m.x2532 <= 8)
m.c1297 = Constraint(expr=m.x29*m.x2514 + m.x654*m.x2520 + m.x1279*m.x2526 + m.x1904*m.x2532 <= 8)
m.c1298 = Constraint(expr=m.x30*m.x2514 + m.x655*m.x2520 + m.x1280*m.x2526 + m.x1905*m.x2532 <= 8)
m.c1299 = Constraint(expr=m.x31*m.x2514 + m.x656*m.x2520 + m.x1281*m.x2526 + m.x1906*m.x2532 <= 8)
m.c1300 = Constraint(expr=m.x32*m.x2514 + m.x657*m.x2520 + m.x1282*m.x2526 + m.x1907*m.x2532 <= 8)
m.c1301 = Constraint(expr=m.x33*m.x2514 + m.x658*m.x2520 + m.x1283*m.x2526 + m.x1908*m.x2532 <= 8)
m.c1302 = Constraint(expr=m.x34*m.x2514 + m.x659*m.x2520 + m.x1284*m.x2526 + m.x1909*m.x2532 <= 8)
m.c1303 = Constraint(expr=m.x35*m.x2514 + m.x660*m.x2520 + m.x1285*m.x2526 + m.x1910*m.x2532 <= 8)
m.c1304 = Constraint(expr=m.x36*m.x2514 + m.x661*m.x2520 + m.x1286*m.x2526 + m.x1911*m.x2532 <= 8)
m.c1305 = Constraint(expr=m.x37*m.x2514 + m.x662*m.x2520 + m.x1287*m.x2526 + m.x1912*m.x2532 <= 8)
m.c1306 = Constraint(expr=m.x38*m.x2514 + m.x663*m.x2520 + m.x1288*m.x2526 + m.x1913*m.x2532 <= 8)
m.c1307 = Constraint(expr=m.x39*m.x2514 + m.x664*m.x2520 + m.x1289*m.x2526 + m.x1914*m.x2532 <= 8)
m.c1308 = Constraint(expr=m.x40*m.x2514 + m.x665*m.x2520 + m.x1290*m.x2526 + m.x1915*m.x2532 <= 8)
m.c1309 = Constraint(expr=m.x41*m.x2514 + m.x666*m.x2520 + m.x1291*m.x2526 + m.x1916*m.x2532 <= 8)
m.c1310 = Constraint(expr=m.x42*m.x2514 + m.x667*m.x2520 + m.x1292*m.x2526 + m.x1917*m.x2532 <= 8)
m.c1311 = Constraint(expr=m.x43*m.x2514 + m.x668*m.x2520 + m.x1293*m.x2526 + m.x1918*m.x2532 <= 8)
m.c1312 = Constraint(expr=m.x44*m.x2514 + m.x669*m.x2520 + m.x1294*m.x2526 + m.x1919*m.x2532 <= 8)
m.c1313 = Constraint(expr=m.x45*m.x2514 + m.x670*m.x2520 + m.x1295*m.x2526 + m.x1920*m.x2532 <= 8)
m.c1314 = Constraint(expr=m.x46*m.x2514 + m.x671*m.x2520 + m.x1296*m.x2526 + m.x1921*m.x2532 <= 8)
m.c1315 = Constraint(expr=m.x47*m.x2514 + m.x672*m.x2520 + m.x1297*m.x2526 + m.x1922*m.x2532 <= 8)
m.c1316 = Constraint(expr=m.x48*m.x2514 + m.x673*m.x2520 + m.x1298*m.x2526 + m.x1923*m.x2532 <= 8)
m.c1317 = Constraint(expr=m.x49*m.x2514 + m.x674*m.x2520 + m.x1299*m.x2526 + m.x1924*m.x2532 <= 8)
m.c1318 = Constraint(expr=m.x50*m.x2514 + m.x675*m.x2520 + m.x1300*m.x2526 + m.x1925*m.x2532 <= 8)
m.c1319 = Constraint(expr=m.x51*m.x2514 + m.x676*m.x2520 + m.x1301*m.x2526 + m.x1926*m.x2532 <= 8)
m.c1320 = Constraint(expr=m.x52*m.x2514 + m.x677*m.x2520 + m.x1302*m.x2526 + m.x1927*m.x2532 <= 8)
m.c1321 = Constraint(expr=m.x53*m.x2514 + m.x678*m.x2520 + m.x1303*m.x2526 + m.x1928*m.x2532 <= 8)
m.c1322 = Constraint(expr=m.x54*m.x2514 + m.x679*m.x2520 + m.x1304*m.x2526 + m.x1929*m.x2532 <= 8)
m.c1323 = Constraint(expr=m.x55*m.x2514 + m.x680*m.x2520 + m.x1305*m.x2526 + m.x1930*m.x2532 <= 8)
m.c1324 = Constraint(expr=m.x56*m.x2514 + m.x681*m.x2520 + m.x1306*m.x2526 + m.x1931*m.x2532 <= 8)
m.c1325 = Constraint(expr=m.x57*m.x2514 + m.x682*m.x2520 + m.x1307*m.x2526 + m.x1932*m.x2532 <= 8)
m.c1326 = Constraint(expr=m.x58*m.x2514 + m.x683*m.x2520 + m.x1308*m.x2526 + m.x1933*m.x2532 <= 8)
m.c1327 = Constraint(expr=m.x59*m.x2514 + m.x684*m.x2520 + m.x1309*m.x2526 + m.x1934*m.x2532 <= 8)
m.c1328 = Constraint(expr=m.x60*m.x2514 + m.x685*m.x2520 + m.x1310*m.x2526 + m.x1935*m.x2532 <= 8)
m.c1329 = Constraint(expr=m.x61*m.x2514 + m.x686*m.x2520 + m.x1311*m.x2526 + m.x1936*m.x2532 <= 8)
m.c1330 = Constraint(expr=m.x62*m.x2514 + m.x687*m.x2520 + m.x1312*m.x2526 + m.x1937*m.x2532 <= 8)
m.c1331 = Constraint(expr=m.x63*m.x2514 + m.x688*m.x2520 + m.x1313*m.x2526 + m.x1938*m.x2532 <= 8)
m.c1332 = Constraint(expr=m.x64*m.x2514 + m.x689*m.x2520 + m.x1314*m.x2526 + m.x1939*m.x2532 <= 8)
m.c1333 = Constraint(expr=m.x65*m.x2514 + m.x690*m.x2520 + m.x1315*m.x2526 + m.x1940*m.x2532 <= 8)
m.c1334 = Constraint(expr=m.x66*m.x2514 + m.x691*m.x2520 + m.x1316*m.x2526 + m.x1941*m.x2532 <= 8)
m.c1335 = Constraint(expr=m.x67*m.x2514 + m.x692*m.x2520 + m.x1317*m.x2526 + m.x1942*m.x2532 <= 8)
m.c1336 = Constraint(expr=m.x68*m.x2514 + m.x693*m.x2520 + m.x1318*m.x2526 + m.x1943*m.x2532 <= 8)
m.c1337 = Constraint(expr=m.x69*m.x2514 + m.x694*m.x2520 + m.x1319*m.x2526 + m.x1944*m.x2532 <= 8)
m.c1338 = Constraint(expr=m.x70*m.x2514 + m.x695*m.x2520 + m.x1320*m.x2526 + m.x1945*m.x2532 <= 8)
m.c1339 = Constraint(expr=m.x71*m.x2514 + m.x696*m.x2520 + m.x1321*m.x2526 + m.x1946*m.x2532 <= 8)
m.c1340 = Constraint(expr=m.x72*m.x2514 + m.x697*m.x2520 + m.x1322*m.x2526 + m.x1947*m.x2532 <= 8)
m.c1341 = Constraint(expr=m.x73*m.x2514 + m.x698*m.x2520 + m.x1323*m.x2526 + m.x1948*m.x2532 <= 8)
m.c1342 = Constraint(expr=m.x74*m.x2514 + m.x699*m.x2520 + m.x1324*m.x2526 + m.x1949*m.x2532 <= 8)
m.c1343 = Constraint(expr=m.x75*m.x2514 + m.x700*m.x2520 + m.x1325*m.x2526 + m.x1950*m.x2532 <= 8)
m.c1344 = Constraint(expr=m.x76*m.x2514 + m.x701*m.x2520 + m.x1326*m.x2526 + m.x1951*m.x2532 <= 8)
m.c1345 = Constraint(expr=m.x77*m.x2514 + m.x702*m.x2520 + m.x1327*m.x2526 + m.x1952*m.x2532 <= 8)
m.c1346 = Constraint(expr=m.x78*m.x2514 + m.x703*m.x2520 + m.x1328*m.x2526 + m.x1953*m.x2532 <= 8)
m.c1347 = Constraint(expr=m.x79*m.x2514 + m.x704*m.x2520 + m.x1329*m.x2526 + m.x1954*m.x2532 <= 8)
m.c1348 = Constraint(expr=m.x80*m.x2514 + m.x705*m.x2520 + m.x1330*m.x2526 + m.x1955*m.x2532 <= 8)
m.c1349 = Constraint(expr=m.x81*m.x2514 + m.x706*m.x2520 + m.x1331*m.x2526 + m.x1956*m.x2532 <= 8)
m.c1350 = Constraint(expr=m.x82*m.x2514 + m.x707*m.x2520 + m.x1332*m.x2526 + m.x1957*m.x2532 <= 8)
m.c1351 = Constraint(expr=m.x83*m.x2514 + m.x708*m.x2520 + m.x1333*m.x2526 + m.x1958*m.x2532 <= 8)
m.c1352 = Constraint(expr=m.x84*m.x2514 + m.x709*m.x2520 + m.x1334*m.x2526 + m.x1959*m.x2532 <= 8)
m.c1353 = Constraint(expr=m.x85*m.x2514 + m.x710*m.x2520 + m.x1335*m.x2526 + m.x1960*m.x2532 <= 8)
m.c1354 = Constraint(expr=m.x86*m.x2514 + m.x711*m.x2520 + m.x1336*m.x2526 + m.x1961*m.x2532 <= 8)
m.c1355 = Constraint(expr=m.x87*m.x2514 + m.x712*m.x2520 + m.x1337*m.x2526 + m.x1962*m.x2532 <= 8)
m.c1356 = Constraint(expr=m.x88*m.x2514 + m.x713*m.x2520 + m.x1338*m.x2526 + m.x1963*m.x2532 <= 8)
m.c1357 = Constraint(expr=m.x89*m.x2514 + m.x714*m.x2520 + m.x1339*m.x2526 + m.x1964*m.x2532 <= 8)
m.c1358 = Constraint(expr=m.x90*m.x2514 + m.x715*m.x2520 + m.x1340*m.x2526 + m.x1965*m.x2532 <= 8)
m.c1359 = Constraint(expr=m.x91*m.x2514 + m.x716*m.x2520 + m.x1341*m.x2526 + m.x1966*m.x2532 <= 8)
m.c1360 = Constraint(expr=m.x92*m.x2514 + m.x717*m.x2520 + m.x1342*m.x2526 + m.x1967*m.x2532 <= 8)
m.c1361 = Constraint(expr=m.x93*m.x2514 + m.x718*m.x2520 + m.x1343*m.x2526 + m.x1968*m.x2532 <= 8)
m.c1362 = Constraint(expr=m.x94*m.x2514 + m.x719*m.x2520 + m.x1344*m.x2526 + m.x1969*m.x2532 <= 8)
m.c1363 = Constraint(expr=m.x95*m.x2514 + m.x720*m.x2520 + m.x1345*m.x2526 + m.x1970*m.x2532 <= 8)
m.c1364 = Constraint(expr=m.x96*m.x2514 + m.x721*m.x2520 + m.x1346*m.x2526 + m.x1971*m.x2532 <= 8)
m.c1365 = Constraint(expr=m.x97*m.x2514 + m.x722*m.x2520 + m.x1347*m.x2526 + m.x1972*m.x2532 <= 8)
m.c1366 = Constraint(expr=m.x98*m.x2514 + m.x723*m.x2520 + m.x1348*m.x2526 + m.x1973*m.x2532 <= 8)
m.c1367 = Constraint(expr=m.x99*m.x2514 + m.x724*m.x2520 + m.x1349*m.x2526 + m.x1974*m.x2532 <= 8)
m.c1368 = Constraint(expr=m.x100*m.x2514 + m.x725*m.x2520 + m.x1350*m.x2526 + m.x1975*m.x2532 <= 8)
m.c1369 = Constraint(expr=m.x101*m.x2514 + m.x726*m.x2520 + m.x1351*m.x2526 + m.x1976*m.x2532 <= 8)
m.c1370 = Constraint(expr=m.x102*m.x2514 + m.x727*m.x2520 + m.x1352*m.x2526 + m.x1977*m.x2532 <= 8)
m.c1371 = Constraint(expr=m.x103*m.x2514 + m.x728*m.x2520 + m.x1353*m.x2526 + m.x1978*m.x2532 <= 8)
m.c1372 = Constraint(expr=m.x104*m.x2514 + m.x729*m.x2520 + m.x1354*m.x2526 + m.x1979*m.x2532 <= 8)
m.c1373 = Constraint(expr=m.x105*m.x2514 + m.x730*m.x2520 + m.x1355*m.x2526 + m.x1980*m.x2532 <= 8)
m.c1374 = Constraint(expr=m.x106*m.x2514 + m.x731*m.x2520 + m.x1356*m.x2526 + m.x1981*m.x2532 <= 8)
m.c1375 = Constraint(expr=m.x107*m.x2514 + m.x732*m.x2520 + m.x1357*m.x2526 + m.x1982*m.x2532 <= 8)
m.c1376 = Constraint(expr=m.x108*m.x2514 + m.x733*m.x2520 + m.x1358*m.x2526 + m.x1983*m.x2532 <= 8)
m.c1377 = Constraint(expr=m.x109*m.x2514 + m.x734*m.x2520 + m.x1359*m.x2526 + m.x1984*m.x2532 <= 8)
m.c1378 = Constraint(expr=m.x110*m.x2514 + m.x735*m.x2520 + m.x1360*m.x2526 + m.x1985*m.x2532 <= 8)
m.c1379 = Constraint(expr=m.x111*m.x2514 + m.x736*m.x2520 + m.x1361*m.x2526 + m.x1986*m.x2532 <= 8)
m.c1380 = Constraint(expr=m.x112*m.x2514 + m.x737*m.x2520 + m.x1362*m.x2526 + m.x1987*m.x2532 <= 8)
m.c1381 = Constraint(expr=m.x113*m.x2514 + m.x738*m.x2520 + m.x1363*m.x2526 + m.x1988*m.x2532 <= 8)
m.c1382 = Constraint(expr=m.x114*m.x2514 + m.x739*m.x2520 + m.x1364*m.x2526 + m.x1989*m.x2532 <= 8)
m.c1383 = Constraint(expr=m.x115*m.x2514 + m.x740*m.x2520 + m.x1365*m.x2526 + m.x1990*m.x2532 <= 8)
m.c1384 = Constraint(expr=m.x116*m.x2514 + m.x741*m.x2520 + m.x1366*m.x2526 + m.x1991*m.x2532 <= 8)
m.c1385 = Constraint(expr=m.x117*m.x2514 + m.x742*m.x2520 + m.x1367*m.x2526 + m.x1992*m.x2532 <= 8)
m.c1386 = Constraint(expr=m.x118*m.x2514 + m.x743*m.x2520 + m.x1368*m.x2526 + m.x1993*m.x2532 <= 8)
m.c1387 = Constraint(expr=m.x119*m.x2514 + m.x744*m.x2520 + m.x1369*m.x2526 + m.x1994*m.x2532 <= 8)
m.c1388 = Constraint(expr=m.x120*m.x2514 + m.x745*m.x2520 + m.x1370*m.x2526 + m.x1995*m.x2532 <= 8)
m.c1389 = Constraint(expr=m.x121*m.x2514 + m.x746*m.x2520 + m.x1371*m.x2526 + m.x1996*m.x2532 <= 8)
m.c1390 = Constraint(expr=m.x122*m.x2514 + m.x747*m.x2520 + m.x1372*m.x2526 + m.x1997*m.x2532 <= 8)
m.c1391 = Constraint(expr=m.x123*m.x2514 + m.x748*m.x2520 + m.x1373*m.x2526 + m.x1998*m.x2532 <= 8)
m.c1392 = Constraint(expr=m.x124*m.x2514 + m.x749*m.x2520 + m.x1374*m.x2526 + m.x1999*m.x2532 <= 8)
m.c1393 = Constraint(expr=m.x125*m.x2514 + m.x750*m.x2520 + m.x1375*m.x2526 + m.x2000*m.x2532 <= 8)
m.c1394 = Constraint(expr=m.x126*m.x2514 + m.x751*m.x2520 + m.x1376*m.x2526 + m.x2001*m.x2532 <= 8)
m.c1395 = Constraint(expr=m.x127*m.x2514 + m.x752*m.x2520 + m.x1377*m.x2526 + m.x2002*m.x2532 <= 8)
m.c1396 = Constraint(expr=m.x128*m.x2514 + m.x753*m.x2520 + m.x1378*m.x2526 + m.x2003*m.x2532 <= 8)
m.c1397 = Constraint(expr=m.x129*m.x2514 + m.x754*m.x2520 + m.x1379*m.x2526 + m.x2004*m.x2532 <= 8)
m.c1398 = Constraint(expr=m.x130*m.x2514 + m.x755*m.x2520 + m.x1380*m.x2526 + m.x2005*m.x2532 <= 8)
m.c1399 = Constraint(expr=m.x131*m.x2514 + m.x756*m.x2520 + m.x1381*m.x2526 + m.x2006*m.x2532 <= 8)
m.c1400 = Constraint(expr=m.x132*m.x2514 + m.x757*m.x2520 + m.x1382*m.x2526 + m.x2007*m.x2532 <= 8)
m.c1401 = Constraint(expr=m.x133*m.x2514 + m.x758*m.x2520 + m.x1383*m.x2526 + m.x2008*m.x2532 <= 8)
m.c1402 = Constraint(expr=m.x134*m.x2514 + m.x759*m.x2520 + m.x1384*m.x2526 + m.x2009*m.x2532 <= 8)
m.c1403 = Constraint(expr=m.x135*m.x2514 + m.x760*m.x2520 + m.x1385*m.x2526 + m.x2010*m.x2532 <= 8)
m.c1404 = Constraint(expr=m.x136*m.x2514 + m.x761*m.x2520 + m.x1386*m.x2526 + m.x2011*m.x2532 <= 8)
m.c1405 = Constraint(expr=m.x137*m.x2514 + m.x762*m.x2520 + m.x1387*m.x2526 + m.x2012*m.x2532 <= 8)
m.c1406 = Constraint(expr=m.x138*m.x2514 + m.x763*m.x2520 + m.x1388*m.x2526 + m.x2013*m.x2532 <= 8)
m.c1407 = Constraint(expr=m.x139*m.x2514 + m.x764*m.x2520 + m.x1389*m.x2526 + m.x2014*m.x2532 <= 8)
m.c1408 = Constraint(expr=m.x140*m.x2514 + m.x765*m.x2520 + m.x1390*m.x2526 + m.x2015*m.x2532 <= 8)
m.c1409 = Constraint(expr=m.x141*m.x2514 + m.x766*m.x2520 + m.x1391*m.x2526 + m.x2016*m.x2532 <= 8)
m.c1410 = Constraint(expr=m.x142*m.x2514 + m.x767*m.x2520 + m.x1392*m.x2526 + m.x2017*m.x2532 <= 8)
m.c1411 = Constraint(expr=m.x143*m.x2514 + m.x768*m.x2520 + m.x1393*m.x2526 + m.x2018*m.x2532 <= 8)
m.c1412 = Constraint(expr=m.x144*m.x2514 + m.x769*m.x2520 + m.x1394*m.x2526 + m.x2019*m.x2532 <= 8)
m.c1413 = Constraint(expr=m.x145*m.x2514 + m.x770*m.x2520 + m.x1395*m.x2526 + m.x2020*m.x2532 <= 8)
m.c1414 = Constraint(expr=m.x146*m.x2514 + m.x771*m.x2520 + m.x1396*m.x2526 + m.x2021*m.x2532 <= 8)
m.c1415 = Constraint(expr=m.x147*m.x2514 + m.x772*m.x2520 + m.x1397*m.x2526 + m.x2022*m.x2532 <= 8)
m.c1416 = Constraint(expr=m.x148*m.x2514 + m.x773*m.x2520 + m.x1398*m.x2526 + m.x2023*m.x2532 <= 8)
m.c1417 = Constraint(expr=m.x149*m.x2514 + m.x774*m.x2520 + m.x1399*m.x2526 + m.x2024*m.x2532 <= 8)
m.c1418 = Constraint(expr=m.x150*m.x2514 + m.x775*m.x2520 + m.x1400*m.x2526 + m.x2025*m.x2532 <= 8)
m.c1419 = Constraint(expr=m.x151*m.x2514 + m.x776*m.x2520 + m.x1401*m.x2526 + m.x2026*m.x2532 <= 8)
m.c1420 = Constraint(expr=m.x152*m.x2514 + m.x777*m.x2520 + m.x1402*m.x2526 + m.x2027*m.x2532 <= 8)
m.c1421 = Constraint(expr=m.x153*m.x2514 + m.x778*m.x2520 + m.x1403*m.x2526 + m.x2028*m.x2532 <= 8)
m.c1422 = Constraint(expr=m.x154*m.x2514 + m.x779*m.x2520 + m.x1404*m.x2526 + m.x2029*m.x2532 <= 8)
m.c1423 = Constraint(expr=m.x155*m.x2514 + m.x780*m.x2520 + m.x1405*m.x2526 + m.x2030*m.x2532 <= 8)
m.c1424 = | |
0.1 or endBall.yPosition > maxFieldCorner + 0.1):
emit = True
else:
# Check lower left corner
if (endBall.xPosition < 0 and endBall.xPosition > -2 and endBall.yPosition > -2 and endBall.yPosition < 0):
emit = False
# Check upper left corner
elif (endBall.xPosition < 0 and endBall.xPosition > -2 and endBall.yPosition < maxFieldCorner+2 and endBall.yPosition > maxFieldCorner):
emit = False
# Check lower right corner
elif (endBall.xPosition > maxFieldLine and endBall.xPosition < maxFieldLine+2 and endBall.yPosition > -2 and endBall.yPosition < 0):
emit = False
# Check upper right corner
elif (endBall.xPosition > maxFieldLine and endBall.xPosition < maxFieldLine+2 and endBall.yPosition < maxFieldCorner+2 and endBall.yPosition > maxFieldCorner):
emit = False
# the ball is off the pitch but at the doors (behind the door mirror)
elif (startBall.xPosition < 0 or startBall.xPosition > maxFieldLine) and (startBall.yPosition > minGoalLimit and startBall.yPosition < maxGoalLimit):
if (endBall.xPosition <-5 or endBall.xPosition > maxFieldLine +5):
emit = True
if emit == True:
ballOutFrame = int(startBall.timestamp + self.windowSize)
# print("Emitting BallOut at frame", str(ballOutFrame))
# Check that the BallOut is not after a special event
# that is, an offside, a foul, or a penalty
# For each list of events
for lst in self.specialEvents:
for event in lst:
#frame BallOut -> 28951
#frame Penalty -> 28928
# Check that the BallOut moment is after "event"
# within a certain range (the observation range)
frame = event[0]
#print("Checking Offside... Frame", str(frame))
# Il frame corrente (index) è successivo al momento di evento speciale
if ballOutFrame >= frame:
# print("Offside at frame", str(frame), "\tBallOut at frame", str(ballOutFrame))
if (ballOutFrame - frame) <= self.windowSize + 100:
print("Canceling BallOut at frame", str(ballOutFrame))
emit = False
index += self.stride
return [None, int(index)]
output = self._setEvent(startBall, index, startBall)
index += self.stride
return [output, int(index)]
else:
return [None, int(index)]
def _fieldLimit(self):
"""
Extracts the headland position, to be modified if different reference systems are decided ()
"""
return [0, self.xDimension]
def _fieldCorner(self):
"""
Extracts the position of the sides of the field, to be modified if different reference systems are decided ()
"""
return [0, self.yDimension]
def _goalLimit(self):
return [self.yDimension/2 - 3.75, 3.75 + self.yDimension/2]
def _setEvent(self,data,index,player):
# Da questo momento il player è inteso come palla
return [{'event': 'BallOut',
'timestamp': player.timestamp + self.windowSize}]
#'x': player.xPosition,
#'y': player.yPosition}
def _initialize(self, list):
self.windowSize = list[0]
class GoalEvent(GenericEvent):
"Class that defines the event detector for the goal"
def __init__(self, file, xDimension, yDimension):
"""
:param file: File containing a list of initialization parameters for the module
"""
self.name = "Goal"
self.version = "1.0"
if not(file is None):
self._readFromFile(file)
#print('Event: {}\nVersion: {}\nWindow: {}\n'.format(self.name, self.version, self.windowSize))
self.xDimension = xDimension
self.yDimension = yDimension
def recognize(self, data, index):
"""
Function that identifies whether the ball has entered the goal area
: param data: Compact dataset with positional data and features
: param index: Index from which the window starts
: return: A dictionary with the recognized event, None in case nothing has been recognized
"""
minFieldLine, maxFieldLine = self._fieldLimit()
minGoalLimit, maxGoalLimit = self._goalLimit()
startBall = data[index].get('ball')
for i in range(int(self.windowSize)) :
if (index + i) >= len(data) :
return [None, index]
ball = data[index + i].get('ball')
if (minFieldLine < ball.xPosition) and (ball.xPosition < maxFieldLine):
return [None, index]
elif (ball.yPosition < minGoalLimit - 1) or (ball.yPosition > maxGoalLimit + 1):
return [None, index]
elif (ball.xPosition < minFieldLine - 5) or (ball.xPosition > maxFieldLine + 5):
return [None, index]
i += 1
output = self._setEvent(startBall, index, startBall)
if output != None:
index += i
return [output, int(index)]
def _fieldLimit(self):
"""
Extracts the headland position, to be modified if different reference systems are decided ()
"""
return [0, self.xDimension]
def _goalLimit(self):
return [self.yDimension/2 - 3.75, 3.75 + self.yDimension/2]
def _setEvent(self,data,index,player):
return [{'event': 'Goal',
'timestamp': player.timestamp + math.floor(self.windowSize/2)}]
#'x': player.xPosition,
#'y': player.yPosition}
def _initialize(self, list):
self.windowSize = list[0]
# DATA STORING
class XML():
@staticmethod
def createEventXML(eventList, filename):
"""
Create an xml file from a list of dictionaries containing events
: param data: Data structure from which to create the JSON file
: param filename: String with the path to the JSON file
: Return:
"""
id = 0
annotations = ET.Element('annotations')
for event in eventList:
track = ET.SubElement(annotations, 'track')
track.attrib['id'] = str(int(id))
track.attrib['label'] = event['event']
box = ET.SubElement(track, 'box')
box.attrib['frame'] = str(int(event['timestamp']))
box.attrib['keyframe'] = '1'
box.attrib['occluded'] = '0'
box.attrib['outside'] = '0'
box.attrib['xbr'] = '10'
box.attrib['xtl'] = '100'
box.attrib['ybr'] = '10'
box.attrib['ytl'] = '100'
for attributeName in event:
if ((attributeName != 'event') and (attributeName != 'timestamp')):
attribute = ET.SubElement(box, 'attribute')
attribute.attrib['name'] = attributeName
attribute.text = str(event[attributeName])
id = id + 1
#tree = ET.ElementTree(annotations)
xml = DOM.parseString(ET.tostring(annotations))
with open(filename, 'w') as f:
f.write(xml.toprettyxml())
@staticmethod
def getHalfTimeFrame(filename):
"Extract second half start frame from Annotations_Atomic_Manual.xml"
result = 54000000
tree = ET.parse(filename)
annotations = tree.getroot()
if (annotations.tag != 'annotations'):
raise Exception('No /annotations found in XML')
else:
for track in annotations:
if (track.tag == 'track'):
if (track.attrib.get('id') is None):
raise Exception('No attribute "id" in /track')
elif (int(track.attrib['id']) == 0):
if (track.attrib.get('label') is None):
raise Exception('No attribute "label" in /track')
elif(track.attrib['label'] != 'SecondHalf'):
raise Exception('This track has an id = 0 but is not a second half!')
else:
box = track[0]
if (box.tag != 'box'):
raise Exception('No /box found in /track')
else:
if (box.attrib.get('frame') is None):
raise Exception('No attribute "frame" in /box')
else:
result = int(box.attrib['frame'])
return result
@staticmethod
def getGoalkeepersId(filename):
results = []
tree = ET.parse(filename)
annotations = tree.getroot()
if (annotations.tag != 'annotations'):
raise Exception('No /annotations found in XML')
else:
for track in annotations:
if (track.tag == 'track'):
if (track.attrib.get('id') is None):
raise Exception('No attribute "id" in /track')
elif (int(track.attrib['id']) == -1):
if (track.attrib.get('label') is None):
raise Exception('No attribute "label" in /track')
elif(track.attrib['label'] != 'GoalKeeper'):
raise Exception('This track has not goalkeepers!')
else:
box = track[0]
if (box.tag != 'box'):
raise Exception('No /box found in /track')
else:
for attribute in box:
if (attribute.attrib['name'] == 'goalKeeperId'):
results.append(attribute.text)
return results[0], int(results[1]) + PLAYERVALUE
@staticmethod
def getSpecialEvents(filename):
"Extract frames for goals, fouls and punishments from Annotations_Atomic_Manual.xml"
offsides = []
fouls = []
penalties = []
tree = ET.parse(filename)
annotations = tree.getroot()
if (annotations.tag != 'annotations'):
raise Exception('No /annotations found in XML')
else:
for track in annotations:
if (track.tag == 'track'):
if (track.attrib.get('id') is None):
raise Exception('No attribute "id" in /track')
elif (int(track.attrib['id']) > 0):
if (track.attrib.get('label') is None):
raise Exception('No attribute "label" in /track')
else:
actualList = None
if(track.attrib['label'] == 'Offside'):
actualList = offsides
elif(track.attrib['label'] == 'Penalty'):
actualList = penalties
elif(track.attrib['label'] == 'Foul'):
actualList = fouls
else:
continue
box = track[0]
if (box.tag != 'box'):
raise Exception('No /box found in /track')
else:
if (box.attrib.get('frame') is None):
raise Exception('No attribute "frame" in /box')
else:
if (len(box) <= 0):
raise Exception('No sub attributes in /box')
currentEvent = [int(box.attrib['frame'])]
for attribute in box:
if (attribute.tag != 'attribute') or (attribute.attrib.get('name' is None)):
raise Exception('Malformed sub attributes in /box')
else:
currentEvent.append(attribute.text)
actualList.append(currentEvent)
return [offsides, fouls, penalties]
@staticmethod
def getRefereeEvents(filename):
"Estrae i frame per goal, falli e punizioni da Annotations_Atomic_Manual.xml"
goals = []
fouls = []
penalties = []
tree = ET.parse(filename)
annotations = tree.getroot()
if (annotations.tag != 'annotations'):
raise Exception('No /annotations found in XML')
else:
for track in annotations:
if (track.tag == 'track'):
if (track.attrib.get('id') is None):
raise Exception('No attribute "id" in /track')
elif (int(track.attrib['id']) > 0):
if (track.attrib.get('label') is None):
raise Exception('No attribute "label" in /track')
else:
actualList = None
if(track.attrib['label'] == 'Goal'):
actualList = goals
elif(track.attrib['label'] == 'Penalty'):
actualList = penalties
elif(track.attrib['label'] == 'Foul'):
actualList = fouls
else:
continue
box = track[0]
if (box.tag != 'box'):
raise Exception('No /box found in /track')
else:
if (box.attrib.get('frame') is None):
raise Exception('No attribute "frame" in /box')
else:
if (len(box) <= 0):
raise Exception('No sub attributes in /box')
currentEvent = [int(box.attrib['frame'])]
for attribute in box:
if (attribute.tag != 'attribute') or (attribute.attrib.get('name' is None)):
raise Exception('Malformed sub attributes in /box')
else:
currentEvent.append(attribute.text)
actualList.append(currentEvent)
return [goals, fouls, penalties]
#detector
class Detector():
"Class that performs the | |
933, 899, 736, 195, 147,
366, 181, 973, 59, 873, 379],
[161, 879, 580, 471, 865, 871, 542, 206, 816,
807, 436, 387, 893, 970, 145],
[367, 686, 933, 883, 434, 565, 652, 528, 199,
346, 378, 377, 911, 746, 288],
[844, 843, 849, 486, 101, 957, 940, 223, 930,
113, 359, 782, 652, 783, 559],
[56, 652, 242, 424, 531, 187, 16, 752, 168, 603,
702, 435, 237, 814, 398],
[10, 230, 815, 547, 434, 244, 56, 947, 758, 155,
407, 213, 366, 418, 518],
[438, 950, 214, 575, 809, 811, 370, 916, 57,
964, 918, 461, 428, 971, 456],
[190, 751, 7, 549, 101, 648, 636, 735, 371, 122,
316, 848, 463, 552, 41],
[82, 332, 595, 889, 290, 652, 211, 874, 249,
740, 352, 870, 517, 810, 422],
[248, 681, 64, 600, 6, 399, 108, 991, 123, 413,
862, 309, 28, 957, 861],
[603, 104, 908, 12, 827, 54, 796, 166, 701, 933,
180, 308, 604, 374, 950],
[495, 877, 743, 460, 546, 160, 966, 712, 708,
606, 52, 445, 957, 762, 950],
[39, 185, 527, 228, 972, 273, 584, 336, 352,
376, 681, 554, 34, 322, 125]]),
[686, 345, 940, 678, 562, 159, 206, 990, 927, 298, 539,
662, 265, 951, 400, 720, 379, 145, 288, 559, 398, 518,
456, 41, 422, 861, 950, 950, 125, 322, 34, 554, 681,
376, 352, 336, 584, 273, 972, 228, 527, 185, 39, 495,
603, 248, 82, 190, 438, 10, 56, 844, 367, 161, 116,
528, 940, 929, 898, 770, 67, 357, 491, 61, 867, 425,
746, 957, 613, 873, 970, 746, 783, 814, 418, 971, 552,
810, 957, 374, 762, 957, 445, 52, 606, 708, 712, 966,
160, 546, 460, 743, 877, 104, 681, 332, 751, 950, 230,
652, 843, 686, 879, 943, 331, 211, 933, 899, 736, 195,
147, 366, 181, 973, 59, 893, 911, 652, 237, 366, 428,
463, 517, 28, 604, 308, 180, 933, 701, 166, 796, 54,
827, 12, 908, 64, 595, 7, 214, 815, 242, 849, 933,
580, 471, 865, 871, 542, 206, 816, 807, 436, 387, 377,
782, 435, 213, 461, 848, 870, 309, 862, 413, 123, 991,
108, 399, 6, 600, 889, 549, 575, 547, 424, 486, 883,
434, 565, 652, 528, 199, 346, 378, 359, 702, 407, 918,
316, 352, 740, 249, 874, 211, 652, 290, 101, 809, 434,
531, 101, 957, 940, 223, 930, 113, 603, 155, 964, 122,
371, 735, 636, 648, 811, 244, 187, 16, 752, 168, 758,
57, 916, 370, 56, 947])
def test_snail_040(self):
self.assertEqual(snail([[234, 459, 8, 740, 18, 612, 971, 482, 105, 70],
[725, 582, 552, 166, 909, 83, 323, 842, 901,
479],
[139, 880, 685, 560, 197, 820, 458, 261, 491,
930],
[917, 677, 674, 610, 470, 744, 893, 604, 310,
818],
[826, 470, 627, 391, 222, 544, 687, 939, 544,
952],
[68, 614, 803, 517, 852, 251, 87, 88, 838, 229],
[269, 848, 520, 498, 486, 567, 575, 779, 706,
74],
[567, 438, 209, 639, 573, 640, 885, 830, 665,
130],
[183, 483, 877, 703, 75, 515, 323, 482, 901,
562],
[426, 570, 572, 144, 924, 285, 48, 976, 282,
802]]),
[234, 459, 8, 740, 18, 612, 971, 482, 105, 70, 479,
930, 818, 952, 229, 74, 130, 562, 802, 282, 976, 48,
285, 924, 144, 572, 570, 426, 183, 567, 269, 68, 826,
917, 139, 725, 582, 552, 166, 909, 83, 323, 842, 901,
491, 310, 544, 838, 706, 665, 901, 482, 323, 515, 75,
703, 877, 483, 438, 848, 614, 470, 677, 880, 685, 560,
197, 820, 458, 261, 604, 939, 88, 779, 830, 885, 640,
573, 639, 209, 520, 803, 627, 674, 610, 470, 744, 893,
687, 87, 575, 567, 486, 498, 517, 391, 222, 544, 251,
852])
def test_snail_041(self):
self.assertEqual(snail(
[[966, 770, 415, 443, 591], [733, 239, 637, 938, 246],
[567, 292, 816, 631, 702], [315, 312, 771, 408, 474],
[275, 740, 146, 719, 961]]),
[966, 770, 415, 443, 591, 246, 702, 474, 961, 719, 146,
740, 275, 315, 567, 733, 239, 637, 938, 631, 408, 771,
312, 292, 816])
def test_snail_042(self):
self.assertEqual(snail([[928, 128, 90, 593, 147, 757, 325, 206, 400,
949, 633, 558, 879],
[190, 99, 708, 968, 665, 847, 159, 388, 584,
547, 469, 788, 586],
[684, 65, 832, 834, 651, 891, 458, 712, 596,
377, 465, 789, 44],
[653, 136, 125, 990, 21, 351, 405, 771, 910,
922, 213, 998, 75],
[165, 220, 334, 367, 603, 930, 821, 232, 624,
209, 353, 156, 271],
[437, 145, 802, 747, 716, 565, 784, 364, 524,
475, 283, 81, 501],
[821, 590, 652, 948, 704, 922, 334, 102, 905,
13, 335, 462, 425],
[118, 633, 924, 637, 123, 245, 432, 807, 579,
480, 828, 79, 942],
[805, 592, 718, 356, 790, 549, 125, 844, 691,
71, 835, 150, 747],
[87, 541, 24, 922, 952, 881, 463, 192, 319, 765,
771, 368, 432],
[149, 859, 949, 368, 342, 942, 337, 598, 490,
889, 50, 794, 786],
[868, 167, 392, 93, 126, 521, 922, 941, 210,
170, 982, 94, 43],
[583, 931, 24, 750, 990, 453, 518, 9, 657, 789,
678, 676, 756]]),
[928, 128, 90, 593, 147, 757, 325, 206, 400, 949, 633,
558, 879, 586, 44, 75, 271, 501, 425, 942, 747, 432,
786, 43, 756, 676, 678, 789, 657, 9, 518, 453, 990,
750, 24, 931, 583, 868, 149, 87, 805, 118, 821, 437,
165, 653, 684, 190, 99, 708, 968, 665, 847, 159, 388,
584, 547, 469, 788, 789, 998, 156, 81, 462, 79, 150,
368, 794, 94, 982, 170, 210, 941, 922, 521, 126, 93,
392, 167, 859, 541, 592, 633, 590, 145, 220, 136, 65,
832, 834, 651, 891, 458, 712, 596, 377, 465, 213, 353,
283, 335, 828, 835, 771, 50, 889, 490, 598, 337, 942,
342, 368, 949, 24, 718, 924, 652, 802, 334, 125, 990,
21, 351, 405, 771, 910, 922, 209, 475, 13, 480, 71,
765, 319, 192, 463, 881, 952, 922, 356, 637, 948, 747,
367, 603, 930, 821, 232, 624, 524, 905, 579, 691, 844,
125, 549, 790, 123, 704, 716, 565, 784, 364, 102, 807,
432, 245, 922, 334])
def test_snail_043(self):
self.assertEqual(snail(
[[87, 462, 110, 33, 41, 613, 234, 971, 424, 490, 399],
[489, 50, 350, 304, 182, 24, 614, 707, 935, 678, 706],
[363, 94, 140, 854, 757, 467, 369, 903, 629, 342, 144],
[838, 301, 145, 18, 841, 484, 374, 723, 136, 333, 757],
[316, 713, 514, 19, 847, 337, 830, 358, 313, 138, 270],
[869, 803, 76, 126, 424, 80, 383, 117, 180, 519, 534],
[663, 709, 774, 866, 180, 59, 780, 653, 290, 958, 920],
[931, 926, 174, 65, 301, 51, 255, 19, 439, 910, 474],
[229, 84, 159, 158, 470, 597, 842, 83, 794, 285, 20],
[248, 938, 591, 246, 529, 506, 869, 146, 600, 738, 931],
[391, 267, 55, 182, 281, 344, 431, 338, 792, 443, 687]]),
[87, 462, 110, 33, 41, 613, 234, 971, 424, 490, 399,
706, 144, 757, 270, 534, 920, 474, 20, 931, 687, 443,
792, 338, 431, 344, 281, 182, 55, 267, 391, 248, 229,
931, 663, 869, 316, 838, 363, 489, 50, 350, 304, 182,
24, 614, 707, 935, 678, 342, 333, 138, 519, 958, 910,
285, 738, 600, 146, 869, 506, 529, 246, 591, 938, 84,
926, 709, 803, 713, 301, 94, 140, 854, 757, 467, 369,
903, 629, 136, 313, 180, 290, 439, 794, 83, 842, 597,
470, 158, 159, 174, 774, 76, 514, 145, 18, 841, 484,
374, 723, 358, 117, 653, 19, 255, 51, 301, 65, 866,
126, 19, 847, 337, 830, 383, 780, 59, 180, 424, 80])
def test_snail_044(self):
self.assertEqual(snail(
[[64, 644, 694, 5, 163, 760, 568, 84, 67, 517, 872],
[933, 412, 172, 162, 97, | |
A SPECIFIC VALUE.
THIS HAS IMMEDIATE EFFECT AND DO NOT FADE THE SOUND
AFFECT ALL SOUNDS WITH OR WITHOUT PANNING EFFECT.
PANNING SOUND EFFECT WILL BE CONSERVED AFTER ADJUSTING THE VOLUME
:param volume_: float; volume value, default is 1.0
:return : None
"""
# SET THE VOLUME IN CASE OF AN INPUT ERROR
if 0.0 >= volume_ >= 1.0:
volume_ = 1.0
objs = self.snd_obj
i = 0
# SET THE VOLUME FOR ALL SOUNDS
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
# WITH PANNING
if hasattr(single_obj, "pos") and single_obj.pos is not None:
if hasattr(channel, "set_volume"):
# Calculate the sound panning, left & right volume values
left, right = self.stereo_panning(single_obj.pos, self.screen_size.w)
left *= volume_
right *= volume_
channel.set_volume(left, right)
# WITHOUT PANNING
else:
if single_obj is not None:
if hasattr(single_obj.sound, "set_volume"):
single_obj.sound.set_volume(volume_)
i += 1
def pause_sound(self, name_: str = None, id_=None) -> None:
"""
PAUSE A SINGLE SOUND FROM THE MIXER (AT LEAST ONE SEARCH ELEMENT HAS TO BE PROVIDED NAME OR ID)
:param name_ : string | None; Given sound name (name given at the time eof the SoundObject construction)
:param id_ : int | None; Default None. ID number such as object_id_ = id(sound_).
:return : None
"""
if name_ is None and id_ is None:
raise ValueError("\nInvalid function call, at least one argument must be set!")
# search by name take precedence (if name value is not undefined)
if name_ is not None:
id_ = None
objs = self.snd_obj
i = 0
# SET THE VOLUME FOR ALL SOUNDS
for channel in self.channels:
if hasattr(channel, "pause"):
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
# search by name
if name_ is not None:
if single_obj.name == name_:
channel.pause()
# search by id_
elif id_ is not None:
if single_obj.obj_id == id_:
channel.pause()
i += 1
...
def pause_sounds(self) -> None:
"""
PAUSE ALL SOUND OBJECTS (THIS HAS IMMEDIATE EFFECT)
:return : None
"""
objs = self.snd_obj
i = 0
# SET THE VOLUME FOR ALL SOUNDS
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
if hasattr(channel, "pause"):
channel.pause()
i += 1
def unpause_sounds(self) -> None:
"""
UNPAUSE ALL SOUND OBJECTS (THIS HAS IMMEDIATE EFFECT)
:return : None
"""
objs = self.snd_obj
i = 0
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
if hasattr(channel, "unpause"):
channel.unpause()
i += 1
def unpause_sound(self, name_: str = None, id_=None) -> None:
"""
UNPAUSE A SINGLE SOUND FROM THE MIXER (AT LEAST ONE SEARCH ELEMENT HAS TO BE PROVIDED NAME OR ID)
:param name_ : string | None; Given sound name (name given at the time eof the SoundObject construction)
:param id_ : int | None; Default None. ID number such as object_id_ = id(sound_).
:return : None
"""
if name_ is None and id_ is None:
raise ValueError("\nInvalid function call, at least one argument must be set!")
# search by name take precedence (if name value is not undefined)
if name_ is not None:
id_ = None
objs = self.snd_obj
i = 0
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
# search by name
if name_ is not None:
if single_obj.name == name_:
channel.unpause()
# search by id_
elif id_ is not None:
if single_obj.obj_id == id_:
channel.unpause()
i += 1
def show_free_channels(self) -> list:
"""
RETURN A LIST OF FREE CHANNELS (NUMERICAL VALUES).
:return: list; RETURN A LIST
"""
free_channels = []
i = 0
free_channels_append = free_channels.append
start = self.start
for c in self.channels:
if not c.get_busy():
free_channels_append(i + start)
i += 1
print("Free channels : %s " % free_channels)
return free_channels
def show_sounds_playing(self):
"""
DISPLAY ALL SOUNDS OBJECTS
"""
j = 0
for object_ in self.snd_obj:
if object_:
timeleft = round(object_.length - (time() - object_.time), 2)
# if timeleft < 0, most likely to be a sound with attribute loop enabled
if timeleft < 0.0:
timeleft = 0.0
print('Name %s priority %s channel %s length(s) %s time left(s) %s' %
(object_.name, object_.priority, object_.active_channel, round(object_.length, 2),
timeleft))
j += 1
def get_identical_sounds(self, sound_: pygame.mixer.Sound) -> list:
"""
RETURN A LIST OF CHANNEL(S) PLAYING IDENTICAL SOUND OBJECT(s)
SEARCH BY IDENTICAL PYGAME.SOUND OBJECT
:param sound_ : Mixer object; Object to compare to
:return : python list; List containing channels number playing similar sound object,
if no match is found, return an empty list
"""
assert isinstance(sound_, pygame.mixer.Sound), \
"\nPositional argument sound_ must be a pygame.mixer.Sound type, got %s " % type(sound_)
duplicate = []
duplicate_append = duplicate.append
for obj in self.snd_obj:
if obj:
if obj.sound == sound_:
duplicate_append(obj.active_channel)
return duplicate
def get_identical_id(self, id_: int) -> list:
"""
RETURN A LIST CONTAINING ANY IDENTICAL SOUND BEING MIXED.
USE THE UNIQUE ID FOR REFERENCING OBJECTS
:param id_: python integer; unique id number that reference a sound object
:return : list; Return a list of channels containing identical sound object
"""
assert isinstance(id_, int), \
"\nPositional argument id_ must be an int type, got %s " % type(id_)
duplicate = []
duplicate_append = duplicate.append
for obj in self.snd_obj:
if obj:
if obj.obj_id == id_:
duplicate_append(obj)
return duplicate
def stop(self, stop_list_: list):
"""
STOP ALL SOUND BEING PLAYED ON THE GIVEN LIST OF CHANNELS.
ONLY SOUND WITH PRIORITY LEVEL 0 CAN BE STOPPED.
:param stop_list_: python list; list of channels
:return : None
"""
assert isinstance(stop_list_, list), \
"\nPositional argument stop_list must be a python list type, got %s " % type(stop_list_)
start = self.start
snd_obj = self.snd_obj
channels = self.channels
for c in stop_list_:
l = c - start
if snd_obj[l]:
if snd_obj[l].priority == 0:
channels[l].set_volume(0.0, 0.0)
channels[l].stop()
self.update()
def stop_all_except(self, exception_: list):
"""
STOP ALL SOUND OBJECT EXCEPT SOUNDS FROM A GIVEN LIST OF ID(SOUND)
IT WILL STOP SOUND PLAYING ON ALL CHANNELS REGARDLESS
OF THEIR PRIORITY.
:param exception_: Can be a single pygame.Sound id value or a list containing
all pygame.Sound object id numbers.
"""
assert isinstance(exception_, list),\
"\nPositional argument exception_ must be a python list type, got %s " % type(exception_)
start = self.start
snd_obj = self.snd_obj
channels = self.channels
for c in self.all:
l = c - start
snd_object = snd_obj[l]
if snd_object:
if snd_object.obj_id not in exception_:
channels[l].set_volume(0.0)
channels[l].stop()
self.update()
def stop_all(self):
"""
STOP ALL SOUNDS NO EXCEPTIONS.
:return: None
"""
start = self.start
snd_obj = self.snd_obj
channels = self.channels
for c in self.all:
l = c - start
snd_object = snd_obj[l]
if snd_object:
channels[l].set_volume(0.0)
channels[l].stop()
self.update()
def stop_name(self, name_: str = ""):
"""
STOP A PYGAME.SOUND OBJECT IF PLAYING ON ANY OF THE CHANNELS.
:param name_: string; Sound name to stop
:return : None
"""
assert isinstance(name_, str),\
"\nPositional argument name_ must be a python string type, got %s " % type(name_)
channels = self.channels
start = self.start
for sound in self.snd_obj:
if sound and sound.name == name_:
try:
channels[sound.active_channel - start].set_volume(0.0)
channels[sound.active_channel - start].stop()
except IndexError:
# IGNORE ERROR
...
self.update()
def stop_object(self, object_id: int):
"""
STOP A GIVEN SOUND USING THE PYGAME.SOUND OBJECT ID NUMBER.
:param object_id: integer; Object unique identifier such as id(sound)
:return : None
"""
assert isinstance(object_id, int), \
"\nPositional argument object_id must be a python string type, got %s " % type(object_id)
channels = self.channels
start = self.start
for sound in self.snd_obj:
if sound and sound.obj_id == object_id:
try:
channels[sound.active_channel - start].set_volume(0.0)
channels[sound.active_channel - start].stop()
except IndexError:
# IGNORE ERROR
...
self.update()
def return_time_left(self, object_id) -> float:
"""
RETURN THE TIME LEFT IN SECONDS (RETURN -1 IF | |
<filename>components/server/tests/data/test_datamodel.py<gh_stars>0
"""Unit tests for the data model."""
import json
import pathlib
import unittest
def setUpModule(): # pylint: disable=invalid-name
"""Read the data model once for all data model tests."""
with pathlib.Path("src/data/datamodel.json").open() as data_model_json:
DataModelTestCase.data_model = json.load(data_model_json)
class DataModelTestCase(unittest.TestCase): # skipcq: PTC-W0046
"""Base class for data model unit tests."""
data_model = {}
class DataModelTest(DataModelTestCase):
"""Unit tests for the data model."""
def test_top_level_keys(self):
"""Test that the top level keys are correct."""
self.assertEqual({"metrics", "subjects", "sources", "scales"}, set(self.data_model.keys()))
def test_metrics_have_sources(self):
"""Test that each metric has one or more sources."""
for metric in self.data_model["metrics"].values():
self.assertTrue(len(metric["sources"]) >= 1)
def test_source_parameters(self):
"""Test that the sources have at least one parameter for each metric supported by the source."""
for metric_id, metric in self.data_model["metrics"].items():
for source in metric["sources"]:
# pylint: disable=superfluous-parens
if not (parameters := self.data_model["sources"][source]["parameters"]):
continue
parameter_metrics = []
for parameter in parameters.values():
parameter_metrics.extend(parameter["metrics"])
self.assertTrue(
metric_id in parameter_metrics, f"No parameters for metric '{metric_id}' in source '{source}'"
)
def test_addition(self):
"""Test each metric had its addition defined correctly."""
for metric in self.data_model["metrics"].values():
self.assertTrue(metric["addition"] in ("max", "min", "sum"))
def test_default_source(self):
"""Test that each metric has a default source, and that the default source is listed as possible source."""
for metric in self.data_model["metrics"].values():
self.assertTrue(metric["default_source"] in metric["sources"])
def test_metrics_belong_to_at_least_one_subject(self):
"""Test that each metric belongs to at least one subject."""
for metric in self.data_model["metrics"]:
for subject in self.data_model["subjects"].values():
if metric in subject["metrics"]:
break
else: # pragma: no cover
self.fail(f"Metric {metric} not listed in any subject.")
def test_metric_direction(self):
"""Test that all metrics have a valid direction."""
for metric_uuid, metric in self.data_model["metrics"].items():
direction = metric["direction"]
self.assertTrue(direction in ("<", ">"), f"Metric {metric_uuid} has an invalid direction: {direction}")
def test_metrics_have_scales(self):
"""Test that all metrics have one or more allowed scales and a default scale that's in the allowed scales."""
scales = self.data_model["scales"].keys()
for metric_id, metric in self.data_model["metrics"].items():
allowed_scales = metric.get("scales", [])
self.assertTrue(len(allowed_scales) > 0, f"Metric {metric} has no scales")
for scale in allowed_scales:
self.assertTrue(scale in scales, f"Metric scale {scale} not in collection of all scales")
default_scale = metric["default_scale"]
self.assertTrue(
default_scale in allowed_scales,
f"Default scale {default_scale} of metric {metric_id} not in allowed scales: {allowed_scales}",
)
def test_all_scales_are_used(self):
"""Test that all scales are used at least once."""
for scale in self.data_model["scales"].keys():
for metric in self.data_model["metrics"].values():
if scale in metric.get("scales", []):
break
else: # pragma: no cover
self.fail(f"Scale {scale} not used for any metric.")
class DataModelSourcesTest(DataModelTestCase):
"""Unit tests for sources in the data model."""
def test_source_attributes(self):
"""Test that sources have a name and description."""
for source_id, source in self.data_model["sources"].items():
expected_attributes = ["name", "description"]
if source_id not in ["calendar", "manual_number", "random_number"]:
expected_attributes.append("url")
for attribute in expected_attributes:
self.assertTrue(attribute in source, f"Source '{source_id} has no '{attribute}' attribute.")
self.assertTrue(
source["description"].endswith("."), f"The description of source '{source_id}' doesn't end with a dot."
)
def test_sources_with_landing_url(self):
"""Test that the the sources with landing url also have url."""
for source in self.data_model["sources"]:
if "landing_url" in self.data_model["sources"][source]["parameters"]:
self.assertTrue(
"url" in self.data_model["sources"][source]["parameters"],
f"Source '{source}' has the 'landing_url' parameter, but not the 'url' parameter.",
)
def test_source_parameter_metrics(self):
"""Test that the metrics listed for source parameters are metrics supported by the source."""
for source_id, source in self.data_model["sources"].items():
for parameter_key, parameter_value in source["parameters"].items():
for metric in parameter_value["metrics"]:
self.assertTrue(
source_id in self.data_model["metrics"][metric]["sources"],
f"Parameter '{parameter_key}' of source '{source_id}' lists metric '{metric}' as metric "
f"needing this parameter, but that metric doesn't list '{source_id}' as allowed source",
)
def test_source_parameter_names(self):
"""Test that each source parameter has a name and short name."""
for source_id, source in self.data_model["sources"].items():
for parameter_key, parameter_value in source["parameters"].items():
for field in ["name", "short_name"]:
error_message = f"Parameter '{parameter_key}' of source '{source_id}' has no {field}"
self.assertTrue(field in parameter_value, error_message)
def test_parameter_api_values(self):
"""Test the api values.
Check that the api values are only used for single or multiple choice parameters and that the keys match with
the possible regular values. The api values are the values used in the source.
"""
for source_id, source in self.data_model["sources"].items():
for parameter_key, parameter in source["parameters"].items():
if "api_values" not in parameter:
continue
self.assertTrue(
"values" in parameter,
f"Parameter {parameter_key} of source {source_id} has api values, but no values.",
)
self.assertTrue(
set(parameter["api_values"].keys()).issubset(set(parameter["values"])),
f"The api values of parameter {parameter_key} are not a subset of the values.",
)
def test_multiple_choice_parameters(self):
"""Test that multiple choice parameters have both a default value and a list of options."""
for source_id, source in self.data_model["sources"].items():
for parameter_id, parameter in source["parameters"].items():
if parameter["type"].startswith("multiple_choice"):
self.assertTrue("default_value" in parameter)
self.assertTrue(
"placeholder" in parameter, f"Parameter {parameter_id} of source {source_id} has no placeholder"
)
self.assertEqual(list, type(parameter["default_value"]))
if parameter["type"] == "multiple_choice":
self.assertTrue("values" in parameter)
if parameter["type"] == "multiple_choice_with_addition":
self.assertFalse("values" in parameter)
self.assertEqual([], parameter["default_value"])
def test_mandatory_parameters(self):
"""Test that each metric has a mandatory field with true or false value."""
for source_id, source in self.data_model["sources"].items():
for parameter_id, parameter_values in source["parameters"].items():
self.assertTrue(
"mandatory" in parameter_values,
f"The parameter '{parameter_id}' of source '{source_id}' has no 'mandatory' field",
)
self.assertTrue(
parameter_values["mandatory"] in (True, False),
f"The 'mandatory' field of parameter '{parameter_id}' of source '{source_id}' is neither "
"true nor false",
)
def test_integer_parameter_unit(self):
"""Test that integer type parameters have a unit."""
for source_id, source in self.data_model["sources"].items():
for parameter_id, parameter_values in source["parameters"].items():
if parameter_values["type"] == "integer":
self.assertTrue(
"unit" in parameter_values,
f"Parameter '{parameter_id}' of source '{source_id}' has integer type but no unit parameter",
)
def test_invalid_characters_in_names(self):
"""Test that we don't use dots in metric or source names since we want to be able to use the names as keys."""
for source in self.data_model["sources"].values():
self.assertFalse("." in source["name"])
for metric in self.data_model["metrics"].values():
self.assertFalse("." in metric["name"])
def test_validate_on(self):
"""Test that the list of parameters to validate on are in fact parameters of the source."""
for source in self.data_model["sources"].values():
parameter_keys = source["parameters"].keys()
for parameter_value in source["parameters"].values():
for parameter_key in parameter_value.get("validate_on", []):
self.assertTrue(parameter_key in parameter_keys)
def test_source_parameter_help(self):
"""Test that source parameters have a help url or text, but not both, and that they are formatted correctly."""
for source in self.data_model["sources"].values():
for parameter_key, parameter in source["parameters"].items():
parameter_description = f"The parameter '{parameter_key}' of the source '{source['name']}'"
self.assertFalse(
"help" in parameter and "help_url" in parameter,
f"{parameter_description} has both a help and a help_url",
)
if "help" in parameter:
self.assertTrue(parameter["help"].endswith("."), f"{parameter_description} does not end with a dot")
def test_entity_attributes(self):
"""Test that entities have the required attributes."""
for source_id, source in self.data_model["sources"].items():
for entity_key, entity_value in source["entities"].items():
self.assertTrue("name" in entity_value.keys())
self.assertTrue("name_plural" in entity_value.keys(), f"No 'name_plural' in {source_id}.{entity_key}")
def test_colors(self):
"""Test that the color values are correct."""
for source_id, source in self.data_model["sources"].items():
for entity_key, entity_value in source["entities"].items():
for attribute in entity_value["attributes"]:
for color_value in attribute.get("color", {}).values():
self.assertTrue(
color_value in ("active", "error", "negative", "positive", "warning"),
f"Color {color_value} of {source_id}.{entity_key} is not correct",
)
def test_entity_attribute_type(self):
"""Test that each entity attribute has a correct type."""
allowed_types = ("date", "datetime", "string", "float", "integer", "minutes", "status")
for source_id, source in self.data_model["sources"].items():
for entity_key, entity_value in source["entities"].items():
for attribute in entity_value["attributes"]:
if "type" in attribute:
self.assertIn(
attribute["type"],
allowed_types,
f"Attribute {attribute['key']} of {source_id}.{entity_key} has an invalid type "
f"({attribute['type']}); should be one of {allowed_types}",
)
def test_measured_attribute(self):
"""Test that the measured attribute is actually a key of an entity attribute and has a computable type."""
for source in self.data_model["sources"].values():
for entities in source["entities"].values():
if measured_attribute := entities.get("measured_attribute"):
self.assertIn(measured_attribute, [attribute["key"] for attribute in entities["attributes"]])
for attribute in entities["attributes"]:
if measured_attribute == attribute["key"]:
self.assertIn(attribute["type"], ["integer", "float", "minutes"])
def test_configuration(self):
"""Test that sources with a configuration have a correct configuration."""
for source_id, source in self.data_model["sources"].items():
if "configuration" in source:
for configuration in source["configuration"].values():
self.assertIn("name", configuration)
self.assertIn("value", configuration)
self.assertTrue(len(configuration["metrics"]) > 0)
for metric in configuration["metrics"]:
self.assertIn(source_id, self.data_model["metrics"][metric]["sources"])
def test_logos(self):
"""Test that a logo exists for each source type and vice versa."""
sources = self.data_model["sources"]
logos_path = pathlib.Path("src/routes/logos")
for source_type in sources:
logo_path = logos_path / f"{source_type}.png"
self.assertTrue(logo_path.exists(), f"No logo exists for {source_type}")
for logo_path in logos_path.glob("*.png"):
self.assertTrue(logo_path.stem in sources, f"No source exists in the data model for {logo_path}")
class DataModelSpecificSourcesTest(DataModelTestCase):
"""Unit tests for specific sources in the data model."""
def test_quality_time_source_type_parameter(self):
"""Test that the source type parameter of the Quality-time source lists all source types."""
all_source_names = {source["name"] for source in self.data_model["sources"].values()}
quality_time_source_names = set(
self.data_model["sources"]["quality_time"]["parameters"]["source_type"]["values"]
)
self.assertEqual(all_source_names, quality_time_source_names)
all_source_api_values = {
(source["name"], source_id) for source_id, source | |
skip_0:
if varying_in:
shader_source += "varying " + input + "_vs_output;\n"
else:
shader_source += insert_layout_location(index_counter)
shader_source += "in " + input + "_vs_output;\n"
index_counter += 1
for instance_input in instance_inputs:
shader_source += insert_layout_location(index_counter)
shader_source += "layout(location = " + str(index_counter) + ") in " + instance_input + "_instance_input;\n"
index_counter += 1
# outputs structs
index_counter = 0
if _shader.shader_type == "vs":
for output in outputs:
if output.split()[1] != "position":
if varying_in:
shader_source += "varying " + output + "_" + _shader.shader_type + "_output;\n"
else:
shader_source += insert_layout_location(index_counter)
shader_source += "out " + output + "_" + _shader.shader_type + "_output;\n"
index_counter += 1
elif _shader.shader_type == "ps":
for p in range(0, len(outputs)):
if "SV_Depth" in output_semantics[p]:
continue
else:
if not gl_frag_color:
output_index = output_semantics[p].replace("SV_Target", "")
if output_index != "":
shader_source += "layout(location = " + output_index + ") "
else:
shader_source += insert_layout_location(0)
shader_source += "out " + outputs[p] + "_ps_output;\n"
if _info.v_flip:
shader_source += "uniform float v_flip;\n"
# global structs for access to inputs or outputs from any function in vs or ps
if _shader.shader_type != "cs":
shader_source += generate_global_io_struct(inputs, "struct " + _shader.input_struct_name)
if _shader.instance_input_struct_name:
if len(instance_inputs) > 0:
shader_source += generate_global_io_struct(instance_inputs, "struct " + _shader.instance_input_struct_name)
if len(outputs) > 0:
shader_source += generate_global_io_struct(outputs, "struct " + _shader.output_struct_name)
# convert sample_texture to sample_texture_2d etc
if explicit_texture_sampling:
texture_types = texture_types_from_resource_decl(_shader.resource_decl)
_shader.functions_source = replace_texture_samples(_shader.functions_source, texture_types)
_shader.main_func_source = replace_texture_samples(_shader.main_func_source, texture_types)
if uniform_pack:
_shader.functions_source = insert_uniform_unpack_assignment(_shader.functions_source, uniform_pack)
_shader.main_func_source = insert_uniform_unpack_assignment(_shader.main_func_source, uniform_pack)
resource_decl = _shader.resource_decl
if _info.shader_sub_platform == "gles":
resource_decl = replace_token_pasting(resource_decl)
shader_source += _tp.struct_decls
shader_source += uniform_buffers
shader_source += resource_decl
shader_source += _shader.functions_source
glsl_main = _shader.main_func_source
skip_function_start = glsl_main.find("{") + 1
skip_function_end = glsl_main.rfind("return")
glsl_main = glsl_main[skip_function_start:skip_function_end].strip()
input_name = {
"vs": "_vs_input",
"ps": "_vs_output",
"cs": "_cs_input"
}
output_name = {
"vs": "_vs_output",
"ps": "_ps_output",
"cs": "_cs_output"
}
if _shader.shader_type == "cs":
shader_source += "layout("
shader_source += "local_size_x = " + str(_tp.threads[0]) + ", "
shader_source += "local_size_y = " + str(_tp.threads[1]) + ", "
shader_source += "local_size_z = " + str(_tp.threads[2])
shader_source += ") in;\n"
shader_source += "void main()\n{\n"
shader_source += "ivec3 gid = ivec3(gl_GlobalInvocationID);\n"
shader_source += glsl_main
shader_source += "\n}\n"
else:
# vs and ps need to assign in / out attributes to structs
pre_assign = generate_input_assignment(inputs, _shader.input_struct_name, "_input", input_name[_shader.shader_type])
if _shader.instance_input_struct_name:
if len(instance_inputs) > 0:
pre_assign += generate_input_assignment(instance_inputs,
_shader.instance_input_struct_name, "instance_input", "_instance_input")
post_assign = generate_output_assignment(_info, outputs, "_output", output_name[_shader.shader_type], gl_frag_color)
shader_source += "void main()\n{\n"
shader_source += "\n" + pre_assign + "\n"
shader_source += glsl_main
shader_source += "\n" + post_assign + "\n"
shader_source += "}\n"
# condition source
shader_source = replace_io_tokens(shader_source)
shader_source = format_source(shader_source, 4)
# replace sv_semantic tokens
for sv in _shader.sv_semantics:
if sv[0] == "SV_InstanceID":
shader_source = replace_token(sv[2], "gl_InstanceID", shader_source)
elif sv[0] == "SV_VertexID":
shader_source = replace_token(sv[2], "gl_VertexID", shader_source)
extension = {
"vs": ".vsc",
"ps": ".psc",
"cs": ".csc"
}
temp_extension = {
"vs": ".vert",
"ps": ".frag",
"cs": ".comp"
}
temp_path = os.path.join(_info.temp_dir, pmfx_name)
output_path = os.path.join(_info.output_dir, pmfx_name)
os.makedirs(temp_path, exist_ok=True)
os.makedirs(output_path, exist_ok=True)
temp_file_and_path = os.path.join(temp_path, _tp.name + temp_extension[_shader.shader_type])
temp_shader_source = open(temp_file_and_path, "w")
temp_shader_source.write(shader_source)
temp_shader_source.close()
output_path = os.path.join(_info.output_dir, pmfx_name)
os.makedirs(output_path, exist_ok=True)
output_file_and_path = os.path.join(output_path, _tp.name + extension[_shader.shader_type])
if _info.shader_sub_platform == "nvn":
nvn_sdk = os.getenv("NINTENDO_SDK_ROOT")
if not nvn_sdk:
print("error: you must have nintendo switch sdk installed, "
"'NINTENDO_SDK_ROOT' environment variable is set and is added to your PATH.", flush=True)
sys.exit(1)
exe = os.path.normpath(_info.nvn_exe)
nvn_type = {
"vs": "-stage vertex",
"ps": "-stage fragment",
"cs": "-stage compute"
}
cmd = "-input " + sanitize_file_path(temp_file_and_path) + " "
cmd += nvn_type[_shader.shader_type] + " " + sanitize_file_path(temp_file_and_path) + " "
cmd += "-output " + sanitize_file_path(output_file_and_path) + " "
error_code, error_list, output_list = call_wait_subprocess(exe + " " + cmd)
_tp.error_code = error_code
_tp.error_list = error_list
_tp.output_list = output_list
else:
exe = os.path.join(_info.tools_dir, "bin", "glsl", get_platform_name(), "validator" + get_platform_exe())
if _info.shader_sub_platform == "spirv":
exe += " -V "
exe += " -o " + output_file_and_path
error_code, error_list, output_list = call_wait_subprocess(exe + " " + temp_file_and_path)
_tp.error_code = error_code
_tp.error_list = error_list
_tp.output_list = output_list
if _info.shader_sub_platform != "spirv":
# copy glsl shader to data
shader_file = open(output_file_and_path, "w")
shader_file.write(shader_source)
shader_file.close()
return error_code
# we need to convert ubytes 255 to float 1.0
def convert_ubyte_to_float(semantic):
if semantic.find("COLOR"):
return False
return True
# gets metal packed types from hlsl semantic, all types are float except COLOR: uchar, BLENDINDICES uchar
def get_metal_packed_decl(stage_in, input, semantic):
vector_sizes = ["2", "3", "4"]
packed_decl = ""
if not stage_in:
packed_decl = "packed_"
split = input.split(" ")
type = split[0]
if semantic.find("COLOR") != -1 or semantic.find("BLENDINDICES") != -1:
packed_decl += "uchar"
count = type[len(type)-1]
if count in vector_sizes:
packed_decl += count
else:
packed_decl += type
for i in range(1, len(split)):
packed_decl += " " + split[i]
return packed_decl
# finds token in source code
def find_token(token, string):
delimiters = [
"(", ")", "{", "}", ".", ",", "+", "-", "=", "*", "/",
"&", "|", "~", "\n", "\t", "<", ">", "[", "]", ";", " "
]
fp = string.find(token)
if fp != -1:
left = False
right = False
# check left
if fp > 0:
for d in delimiters:
if string[fp-1] == d:
left = True
break
else:
left = True
# check right
ep = fp + len(token)
if fp < ep-1:
for d in delimiters:
if string[ep] == d:
right = True
break
else:
right = True
if left and right:
return fp
# try again
tt = find_token(token, string[fp+len(token):])
if tt == -1:
return -1
return fp+len(token) + tt
return -1
# replace all occurences of token in source code
def replace_token(token, replace, string):
iter = 0
while True:
pos = find_token(token, string)
if pos == -1:
break
else:
string = string[:pos] + replace + string[pos+len(token):]
pass
return string
# metal main functions require textures and buffers to be passed in as args, and do not support global decls
def metal_functions(functions, cbuffers, textures):
cbuf_members_list = []
for c in cbuffers:
cbuf_members = parse_and_split_block(c)
cbuf_members_list.append(cbuf_members)
texture_list = textures.split(";")
texture_args = []
for t in texture_list:
cpos = t.find(",")
if cpos == -1:
continue
spos = t.find("(")
macro_args = t[spos + 1:].split(",")
tex_type = t[:spos] + "_arg"
name_pos = 0
if t.find("texture_2dms") != -1:
name_pos = 2
name = macro_args[name_pos].strip()
texture_args.append((name, tex_type + "(" + name + ")"))
fl = find_functions(functions)
final_funcs = ""
func_sig_additions = dict()
for f in fl:
bp = f.find("(")
ep = f.find(")")
fb = f[ep:]
fn = f.find(" ")
fn = f[fn+1:bp]
sig = f[:bp+1]
count = 0
# insert cbuf members
for c in cbuf_members_list:
for i in range(0, len(c), 2):
ap = c[i+1].find("[")
member = c[i+1]
if ap != -1:
member = member[:ap]
if find_token(member, fb) != -1:
if count > 0:
sig += ",\n"
if fn in func_sig_additions.keys():
func_sig_additions[fn].append(member)
else:
func_sig_additions[fn] = [member]
ref_type = "& "
if ap != -1:
ref_type = "* "
sig += "constant " + c[i] + ref_type + member
count += 1
# insert texture members
for t in texture_args:
if find_token(t[0], fb) != -1:
if count > 0:
sig += ",\n"
sig += t[1]
count += 1
if fn in func_sig_additions.keys():
func_sig_additions[fn].append(t[0])
func_sig_additions[fn].append("sampler_" + t[0])
else:
func_sig_additions[fn] = [t[0]]
func_sig_additions[fn].append("sampler_" + t[0])
if bp != -1 and ep != -1:
args = f[bp+1:ep]
arg_list = args.split(",")
for arg in arg_list:
if count > 0:
sig += ",\n"
count += 1
address_space = "thread"
toks = arg.split(" ")
if '' in toks:
toks.remove('')
if '\n' in toks:
toks.remove('\n')
ref = False
for t in toks:
if t == "out" or t == "inout":
ref = True
if t == "in":
address_space = "constant"
ref = True
if not ref:
sig += arg
else:
array = toks[2].find("[")
if array == -1:
sig += address_space + " " + toks[1] + "& " + toks[2]
else:
sig += address_space + " " + toks[1] + "* " + toks[2][:array]
# find used cbuf memb
func = sig + fb
final_funcs += func
return final_funcs, func_sig_additions
# | |
"""
This module defines a class called "balto_gui" that can be used to
create a graphical user interface (GUI) for downloading data from
OpenDAP servers from and into a Jupyter notebook. If used with Binder,
this GUI runs in a browser window and does not require the user to
install anything on their computer. However, this module should be
included in the same directory as the Jupyter notebook.
"""
#------------------------------------------------------------------------
#
# Copyright (C) 2020. <NAME>
#
#------------------------------------------------------------------------
from ipyleaflet import Map, basemaps, FullScreenControl
from ipyleaflet import MeasureControl, Rectangle
## from ipyleaflet import ScaleControl # (doesn't work)
from traitlets import Tuple
## import ipyleaflet as ipyl
import ipywidgets as widgets
from ipywidgets import Layout
from IPython.display import display, HTML
## from IPython.core.display import display
## from IPython.lib.display import display
import pydap.client # (for open_url, etc.)
import requests # (used by get_filenames() )
import json
import datetime # (used by get_duration() )
import copy
import numpy as np
import balto_plot as bp
#------------------------------------------------------------------------
#
# class balto_gui
# __init__()
# pix_str()
# show_gui()
# make_acc_gui()
# make_tab_gui()
# make_data_panel()
# reset_data_panel()
# make_map_panel()
# make_dates_panel()
# make_download_panel()
# make_prefs_panel()
# #--------------------------
# get_map_bounds()
# replace_map_bounds()
# replace_map_bounds2()
# update_map_bounds()
# zoom_out_to_new_bounds()
# --------------------------
# get_url_dir_filenames()
# update_filename_list()
# get_opendap_file_url()
# open_dataset()
# update_data_panel()
# --------------------------
# update_var_info()
# get_all_var_shortnames()
# get_all_var_longnames()
# get_all_var_units()
# --------------------------
# get_var_shortname()
# get_var_longname()
# get_var_units()
# get_var_shape()
# get_var_dimensions()
# get_var_dtype()
# get_var_attributes()
# get_var_time_attributes()
# -------------------------------
# update_datetime_panel()
# get_years_from_time_since()
# clear_datetime_notes()
# append_datetime_notes()
# list_to_string()
# -------------------------------
# pad_with_zeros()
# get_actual_time_units()
# get_time_delta_str()
# get_datetime_obj_from_str()
# get_datetime_obj_from_one_str()
# get_start_datetime_obj()
# get_end_datetime_obj()
# get_dt_from_datetime_str()
# split_datetime_str()
# split_date_str()
# split_time_str()
# get_datetime_from_time_since()
# get_time_since_from_datetime()
# get_month_difference()
# -------------------------------
# get_new_time_index_range()
# get_new_lat_index_range()
# get_new_lon_index_range()
# -------------------------------
# get_duration() ## not used yet
# ----------------------------
# get_download_format()
# clear_download_log()
# append_download_log()
# print_user_choices()
# download_data()
# show_grid()
# -------------------------------
# get_opendap_package() # (in prefs panel)
# ----------------------------
# get_abbreviated_var_name()
# get_possible_svo_names()
#
#------------------------------
# Example GES DISC opendap URL
#------------------------------
# https://gpm1.gesdisc.eosdis.nasa.gov/opendap/GPM_L3/GPM_3IMERGHHE.05/2014/091/
# 3B-HHR-E.MS.MRG.3IMERG.20140401-S000000-E002959.0000.V05B.HDF5.nc
# ?HQprecipitation[1999:2200][919:1049],lon[1999:2200],lat[919:1049]
#------------------------------------------------------------------------
class balto_gui:
#--------------------------------------------------------------------
def __init__(self):
self.version = '0.5'
self.user_var = None
self.default_url_dir = 'http://test.opendap.org/dap/data/nc/'
self.timeout_secs = 60 # (seconds)
#----------------------------------------------------------
# "full_box_width" = (label_width + widget_width)
# gui_width = left_label_width + mid_width + button_width
# The 2nd, label + widget box, is referred to as "next".
# (2 * half_widget_width) + left_label + next_label = 540
#----------------------------------------------------------
self.gui_width = 680
self.left_label_width = 120
self.next_label_width = 50
self.all_label_width = 170
self.full_box_width = 540
self.widget_width = (self.full_box_width - self.left_label_width)
# self.half_widget_width = (self.full_box_width - self.all_label_width)/2
# self.half_widget_width = 183
self.left_widget_width = 230
self.next_widget_width = 136
self.left_box_width = (self.left_label_width + self.left_widget_width)
self.next_box_width = (self.next_label_width + self.next_widget_width)
self.button_width = 70 # big enough for "Reset"
#-----------------------------------------------------
self.map_width = (self.gui_width - 40)
self.map_height = 230 # was 250
self.map_center_init = (20.0, 0)
self.add_fullscreen_control = True
self.add_scale_control = False # (doesn't work)
self.add_measure_control = True
#-----------------------------------------------------
self.gui_width_px = self.pix_str( self.gui_width )
self.map_width_px = self.pix_str( self.map_width )
self.map_height_px = self.pix_str( self.map_height )
#-----------------------------------------------------
self.date_width_px = '240px'
self.time_width_px = '180px'
self.hint_width_px = '120px'
#---------------------------------------------------
self.log_box_width_px = self.pix_str( self.full_box_width )
self.log_box_height_px = '200px'
#---------------------------------------------------
# These styles are used to control width of labels
# self.init_label_style is the initial default.
#---------------------------------------------------
llw_px = self.pix_str( self.left_label_width )
nlw_px = self.pix_str( self.next_label_width )
self.init_label_style = {'description_width': 'initial'}
self.left_label_style = {'description_width': llw_px}
self.next_label_style = {'description_width': nlw_px}
self.date_style = {'description_width': '70px'}
self.time_style = {'description_width': '70px'}
# __init__()
#--------------------------------------------------------------------
def pix_str(self, num):
return str(num) + 'px'
#--------------------------------------------------------------------
def show_gui(self, ACC_STYLE=False, SHOW_MAP=True):
#------------------------------------------------------
# Encountered a problem where there was some problem
# with ipyleaflets (used for the map panel) that
# prevented any part of the GUI from being displayed.
# The SHOW_MAP flag helps to test for this problem.
#------------------------------------------------------
#------------------------------------
# Create & display the complete GUI
#-----------------------------------
if (ACC_STYLE):
self.make_acc_gui()
else:
# Use the TAB style
self.make_tab_gui( SHOW_MAP=SHOW_MAP)
gui_output = widgets.Output()
display(self.gui, gui_output)
# show_gui()
#--------------------------------------------------------------------
def make_acc_gui(self):
gui_width_px = self.gui_width_px
self.make_data_panel()
self.make_map_panel()
self.make_datetime_panel()
self.make_download_panel()
self.make_prefs_panel()
#---------------------------
p0 = self.data_panel
p1 = self.map_panel
p2 = self.datetime_panel
p3 = self.download_panel
p4 = self.prefs_panel
#---------------------------
p0_title = 'Browse Data'
p1_title = 'Spatial Extent'
p2_title = 'Date Range'
p3_title = 'Download Data'
p4_title = 'Settings'
#-------------------------------------------------------
# selected_index=None causes all cells to be collapsed
#-------------------------------------------------------
acc = widgets.Accordion( children=[p0, p1, p2, p3, p4],
selected_index=None,
layout=Layout(width=gui_width_px) )
acc.set_title(0, p0_title)
acc.set_title(1, p1_title)
acc.set_title(2, p2_title)
acc.set_title(3, p3_title)
acc.set_title(4, p4_title)
# title = 'BALTO User Interface'
# L_tags = "<b><font size=5>"
# R_tags = "</font></b>"
# heading = (L_tags + title + R_tags)
pad = self.get_padding(1, HORIZONTAL=False) # 1 lines
head = widgets.HTML(value=f"<b><font size=4>BALTO User Interface</font></b>")
# head = widgets.Label('BALTO User Interface')
# self.gui = widgets.VBox([pad, head, acc]) # (top padding
self.gui = widgets.VBox([head, acc]) # (no top padding)
# make_acc_gui()
#--------------------------------------------------------------------
def make_tab_gui(self, SHOW_MAP=True):
#---------------------------------------------------------
# If there is a problem with ipyleaflet, it can prevent
# any part of the GUI from being displayed. You can
# set SHOW_MAP=False to remove the map to test for this.
#---------------------------------------------------------
gui_width_px = self.gui_width_px
self.make_data_panel()
self.make_map_panel( SHOW_MAP=SHOW_MAP )
self.make_datetime_panel()
self.make_download_panel()
self.make_prefs_panel()
#---------------------------
p0 = self.data_panel
p1 = self.map_panel
p2 = self.datetime_panel
p3 = self.download_panel
p4 = self.prefs_panel
#---------------------------
p0_title = 'Browse Data'
p1_title = 'Spatial Extent'
p2_title = 'Date Range'
p3_title = 'Download Data'
p4_title = 'Settings'
#-------------------------------------------------------
# selected_index=0 shows Browse Data panel
#-------------------------------------------------------
tab = widgets.Tab( children=[p0, p1, p2, p3, p4],
selected_index=0,
layout=Layout(width=gui_width_px) )
tab.set_title(0, p0_title)
tab.set_title(1, p1_title)
tab.set_title(2, p2_title)
tab.set_title(3, p3_title)
tab.set_title(4, p4_title)
#### tab.titles = [str(i) for i in range(len(children))]
# title = 'BALTO User Interface'
# L_tags = "<b><font size=5>"
# R_tags = "</font></b>"
# heading = (L_tags + title + R_tags)
pad = self.get_padding(1, HORIZONTAL=False) # 1 lines
head = widgets.HTML(value=f"<b><font size=5>BALTO User Interface</font></b>")
# head = widgets.Label('BALTO User Interface')
## self.gui = widgets.VBox([pad, head, acc])
self.gui = widgets.VBox([head, tab]) # (no padding above)
# make_tab_gui()
#--------------------------------------------------------------------
def get_padding(self, n, HORIZONTAL=True):
#-------------------------------
# Get some white space padding
#-------------------------------
if (HORIZONTAL):
#--------------------------------
# Use overloaded multiplication
#--------------------------------
## s = (' ' * n) # overloaded multiplication
s = "<p>" + (' ' * n) + "</p>"
pad = widgets.HTML( value=s )
else:
s = ("<br>" * n)
pad = widgets.HTML( value=s )
return pad
# get_padding()
#--------------------------------------------------------------------
def make_data_panel(self):
#-----------------------------------
# Browse data on an OpenDAP server
#-----------------------------------
left_style = self.left_label_style
next_style = self.next_label_style
full_width_px = self.pix_str( self.full_box_width )
left_width_px = self.pix_str( self.left_box_width )
next_width_px = self.pix_str( self.next_box_width )
btn_width_px = self.pix_str( self.button_width )
#---------------------------------------------------------
o1 = widgets.Text(description='OpenDAP URL Dir:',
value=self.default_url_dir,
disabled=False, style=left_style,
layout=Layout(width=full_width_px))
b1 = widgets.Button(description="Go", layout=Layout(width=btn_width_px))
o2 = widgets.Dropdown( description='Filename:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=full_width_px) )
#------------------------------------------------------------------
oL = widgets.Text(description='Long name:', style=left_style,
value='', layout=Layout(width=full_width_px) )
## o3 = widgets.Select( description='Variable:',
o3 = widgets.Dropdown( description='Variable:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=left_width_px) )
o4 = widgets.Text(description='Units:', style=next_style,
value='', layout=Layout(width=next_width_px) )
#------------------------------------------------------------------
o5 = widgets.Text(description='Dimensions:', style=left_style,
value='', layout=Layout(width=left_width_px) )
o6 = widgets.Text(description='Shape:', style=next_style,
value='', layout=Layout(width=next_width_px) )
#------------------------------------------------------------------
o7 = widgets.Text(description='Data type:', style=left_style,
value='', layout=Layout(width=full_width_px) )
o8 = widgets.Dropdown( description='Attributes:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=full_width_px) )
o9 = widgets.Text(description='Status:', style=left_style,
value='Ready.', layout=Layout(width=full_width_px) )
b2 = widgets.Button(description="Reset", layout=Layout(width=btn_width_px))
## pd = widgets.HTML((' ' * 1)) # for padding
#-------------------------------
# Arrange widgets in the panel
#-------------------------------
url_box = widgets.HBox([o1, b1]) # directory + Go button
stat_box = widgets.HBox([o9, b2]) # status + Reset button
name_box = widgets.VBox([o3, o5])
## pad_box = widgets.VBox([pd, pd])
unit_box = widgets.VBox([o4, o6])
mid_box = widgets.HBox([name_box, unit_box])
## mid_box = widgets.HBox([name_box, pad_box, unit_box])
panel = widgets.VBox([url_box, o2, oL, mid_box, o7, o8, stat_box])
self.data_url_dir = o1 # on an OpenDAP server
self.data_filename = o2
self.data_var_long_name = oL
self.data_var_name = o3 # short_name
self.data_var_units = o4
self.data_var_dims = o5
self.data_var_shape = o6
self.data_var_type = o7
self.data_var_atts = o8
self.data_status = o9
self.data_panel = panel
#-----------------
# Event handlers
#-----------------------------------------------------
# Note: NEED to set names='value' here. If names
# keyword is omitted, only works intermittently.
#------------------------------------------------------------
# "on_click" handler function is passed b1 as argument.
# "observe" handler function is passed "change", which
# is a dictionary, as argument. See Traitlet events.
#------------------------------------------------------------
b1.on_click( self.update_filename_list )
b2.on_click( self.reset_data_panel )
o2.observe( self.update_data_panel, names=['options','value'] )
o3.observe( self.update_var_info, names=['options', 'value'] )
## o3.observe( self.update_var_info, names='value' )
## o2.observe( self.update_data_panel, names='All' )
## o3.observe( self.update_var_info, names='All' )
#-------------------------------------------------------
# It turned out this wasn't an issue, but interesting.
#-------------------------------------------------------
# Note: Method functions have type "method" instead
# of "function" and | |
# -*- coding: utf-8 -*-
import logging
import itertools
import typing
import semver
import copy as py_copy
from typing import List, Set, Dict, Tuple, Optional
from enzi import config
from enzi.config import DependencyRef
from enzi.config import Config as EnziConfig
from enzi.config import DependencyVersion
from enzi.config import Locked
from enzi.frontend import Enzi
from enzi.git import GitVersions
from enzi.io import EnziIO
from enzi.utils import flat_map, unique
from enzi.ver import VersionReq
from semver import VersionInfo as Version
logger = logging.getLogger(__name__)
class Dependency(object):
def __init__(self, name: str):
self.name = name
# the sources for this dependency <K=DependencyRef, V=DependencySource>
self.sources: typing.MutableMapping[DependencyRef, DependencySource] = {
}
# the enzi config we chose
self.config: typing.Optional[EnziConfig] = None
def source(self):
# TODO: inspect this code
min_source = min(self.sources.keys())
return self.sources[min_source]
class DependencyConstraint(object):
# TODO: rewrite in more python way
__allow_cons__ = ("Version", "Revision")
def __init__(self, cons, val=None):
if not cons in self.__allow_cons__:
raise ValueError('dep cons must in {}'.format(self.__allow_cons__))
self.cons: str = cons
if val is None or isinstance(val, (VersionReq, Version)) or type(val) == str:
self.value: typing.Union[str, Version, None] = val
else:
raise ValueError(
'dep cons\'s value must be semver.VersionInfo or str')
def __str__(self):
return str(self.value)
@staticmethod
def Version(version: Version):
return DependencyConstraint('Version', version)
@staticmethod
def Revision(revision: str):
return DependencyConstraint('Revision', revision)
@staticmethod
def From(dep: config.Dependency):
if dep.use_version:
return DependencyConstraint.Version(dep.rev_ver)
else:
return DependencyConstraint.Revision(dep.rev_ver)
def is_version(self):
return self.cons == 'Version'
def is_revision(self):
return self.cons == 'Revision'
class State(object):
# TODO: rewrite in more python way
__allow_states__ = ('Open', 'Locked', 'Constrained', 'Pick')
def __init__(self, state, val=None):
if not state in self.__allow_states__:
raise ValueError('state must in {}'.format(self.__allow_states__))
self.state: str = state
self.value: typing.Union[int, set, typing.Tuple[int, set], None] = val
@staticmethod
def Open():
return State('Open')
@staticmethod
def Locked(lock_id: int):
return State('Locked', lock_id)
@staticmethod
def Constrained(versions: set):
return State('Constrained', versions)
@staticmethod
def Pick(pick_id, versions: set):
return State('Pick', (pick_id, versions))
def is_open(self):
return self.state == 'Open'
def is_locked(self):
return self.state == 'Locked'
def is_constrained(self):
return self.state == 'Constrained'
def is_pick(self):
return self.state == 'Pick'
def pick(self) -> int:
if self.is_pick():
return self.value[0]
elif self.is_locked():
return self.value
@property
def lock_id(self):
if self.is_locked():
return self.value
else:
raise RuntimeError(
'INTERNAL ERROR: try to get lock id of a non-locked State')
@lock_id.setter
def lock_id(self, value):
if not type(value) == int:
raise ValueError('ids must be a int')
if self.is_locked():
self.value = value
else:
raise RuntimeError(
'INTERNAL ERROR: try to set lock_id for a State::{}'.format(self.state))
@property
def ids(self) -> set:
if self.is_constrained():
return self.value
elif self.is_pick():
return self.value[1]
else:
raise RuntimeError(
'INTERNAL ERROR: try to get ids of State::{}'.format(self.state))
@ids.setter
def ids(self, ids):
if not type(ids) == set:
raise ValueError('ids must be a set')
if self.is_constrained():
self.value = ids
elif self.is_pick():
# self.value[1] = ids
self.value = (self.value[0], ids)
else:
raise RuntimeError(
'INTERNAL ERROR: try to set ids for a State::{}'.format(self.state))
def dump_cons_map(cons_map: dict):
str_buf = []
names = cons_map.keys()
names = sorted(names)
str_buf.append('{')
for name in names:
cons = cons_map[name]
str_buf.append('\n\t\"{}\" :'.format(name))
for pkg_name, con in cons:
str_buf.append(' {}({});'.format(con, pkg_name))
str_buf.append('\n}')
return ''.join(str_buf)
class DependencySource(object):
def __init__(self, dep_id: DependencyRef, versions: GitVersions, pick=None, options=None, state=State.Open()):
self.id: DependencyRef = dep_id
self.versions: GitVersions = versions
self.pick: typing.Optional[int] = pick
self.options: typing.Optional[typing.MutableSet[int]] = options
self.state: State = state
def current_pick(self) -> Optional[DependencyVersion]:
if self.state.is_open() or self.state.is_constrained():
return None
else:
pick_id = self.state.pick()
return DependencyVersion.Git(self.versions.revisions[pick_id])
class DepTableDumper(object):
"""
dumper for DependencyResolver.table
"""
def __init__(self, table: typing.MutableMapping[str, Dependency]):
self.table = table
def __str__(self):
str_buf = ['{']
names = list(self.table.keys())
names.sort()
for name in names:
dep = self.table[name]
str_buf.append('\n\t{} :'.format(name))
for dep_id, src in dep.sources.items():
str_buf.append('\n\t\t[{}] :'.format(dep_id))
state: State = src.state
if state.is_open():
str_buf.append(' open')
elif state.is_locked():
str_buf.append(' locked {}'.format(state.lock_id))
elif state.is_constrained():
ids = state.ids
str_buf.append(' {} possible'.format(ids))
else:
ids = state.ids
pick_id = state.pick()
str_buf.append(
' picked #{} out of {} possible'.format(pick_id, ids))
str_buf.append('\n}')
return ''.join(str_buf)
# TODO: use a more elegant way
__repr__ = __str__
def find_version(versions: typing.List[typing.Tuple[Version, str]], rev: str):
rev_filter = filter(lambda x: x[1] == rev, versions)
rev_map = map(lambda x: x[0], rev_filter)
try:
return max(rev_map)
except ValueError:
return None
class DependencyResolver(object):
def __init__(self, enzi: Enzi):
self.table: typing.MutableMapping[str,
Dependency] = {} # <K=str, Dependency>
# self.decisions: typing.MutableMapping[str, int] = {} # <K=str, int>
# a cache for dep name <-> dep git url, use for name/git url conflicts
self.git_urls: typing.MutableMapping[str, str] = {}
self.enzi = enzi
def resolve(self) -> Locked:
self.register_dep_in_config(
self.enzi.config.dependencies, self.enzi.config)
iteration = 0
any_change = True
while any_change:
logger.debug('resolve: iteration {}, table {}'.format(
iteration, DepTableDumper(self.table)))
iteration += 1
self.init()
self.mark()
any_change = self.pick()
self.close()
logger.debug('resolve: resolved after {} iterations'.format(iteration))
logger.debug('resolve: resolved table {}'.format(
DepTableDumper(self.table)))
enzi = self.enzi
locked = {}
for name, dep in self.table.items():
dep_config: EnziConfig = dep.config
deps: typing.Set[str] = set(dep_config.dependencies.keys())
src: DependencySource = dep.source()
enzi_src = enzi.dependency_source(src.id)
git_url = ''
if enzi_src.is_git():
git_url = enzi_src.git_url
else:
raise ValueError('INTERNAL ERROR: unreachable')
pick = src.state.pick()
if pick is None:
logger.error('resolver: pick is none')
raise ValueError('pick is none')
rev = src.versions.revisions[pick]
version = find_version(src.versions.versions, rev)
lock_dep = config.LockedDependency(
revision=rev,
version=version,
source=config.LockedSource(git_url),
dependencies=deps
)
locked[name] = lock_dep
return config.Locked(
dependencies=locked,
config_path=self.enzi.config_path,
config_mtime=self.enzi.config_mtime)
def init(self):
for dep in self.table.values():
for src in dep.sources.values():
if not src.state.is_open():
continue
logger.debug('resolve init {}[{}]'.format(dep.name, src.id))
ids = set(range(len(src.versions.revisions)))
src.state = State.Constrained(ids)
def mark(self):
def inner_dep(econfig: EnziConfig):
pkg_name = econfig.package['name']
return map(lambda item: (item[0], (pkg_name, item[1])), econfig.dependencies.items())
other_econf = filter(lambda x: x, map(
lambda dep: dep.config, self.table.values()))
econfig_iter = itertools.chain([self.enzi.config, ], other_econf)
flat_dep = flat_map(inner_dep, econfig_iter)
flat_dep = list(flat_dep)
dep_iter = map(lambda dep: (dep[0], dep[1][0], dep[1][1]), flat_dep)
cons_map = {} # <K=str, V=list[(str, DependencyConstraint)]>
for name, pkg_name, dep in dep_iter:
if not name in cons_map:
cons_map[name] = []
v = cons_map[name]
v.append((pkg_name, DependencyConstraint.From(dep)))
logger.debug("resolve: gathered constraints {}".format(
dump_cons_map(cons_map)))
self.table, table = {}, self.table
for name, cons in cons_map.items():
for _, con in cons:
logger.debug("resolve: impose {} on {}".format(con, name))
for src in table[name].sources.values():
self.impose(name, con, src, cons)
self.table = table
def pick(self):
any_change = False
open_pending = set()
for dep in self.table.values():
for src_id, src in dep.sources.items():
state: State = src.state
if state.is_open():
raise RuntimeError(
'INTERNAL ERROR: unreachable, state = Open')
elif state.is_locked():
pass
elif state.is_constrained():
ids = state.ids
any_change = True
logger.debug(
'resolve:pick: picking version for {}[{}]'.format(dep.name, src.id.id))
pick_id = min(ids)
dep.sources[src_id].state = State.Pick(pick_id, ids)
elif state.is_pick():
pick_id, ids = state.value
if not pick_id in ids:
logger.debug('resolve:pick: picked version for {}[{}] no longer valid, resetting'.format(
dep.name, src.id))
if dep.config:
open_pending.update(dep.config.dependencies.keys())
any_change = True
src.state = State.Open()
while open_pending:
opens, open_pending = open_pending, set()
for dep_name in opens:
logger.debug('resolve:pick: resetting {}'.format(dep_name))
dep = self.table[dep_name]
for src in dep.source.values():
if not src.state.is_open():
any_change = True
if dep.config:
open_pending.update(dep.config.dependencies.keys())
src.state = State.Open()
return any_change
def close(self):
logger.debug('resolve:close: computing closure over dependencies')
enzi_io = EnziIO(self.enzi)
econfigs: typing.List[typing.Tuple[str, EnziConfig]] = []
for dep in self.table.values():
src: DependencySource = dep.source()
version = src.current_pick()
if not version:
continue
econfig = enzi_io.dep_config_version(src.id, version)
econfigs.append((dep.name, econfig))
for name, econfig in econfigs:
if econfig:
logger.debug('resolve:close: for {} load enzi configuration {}'
.format(name, econfig.debug_str()))
self.register_dep_in_config(econfig.dependencies, econfig)
self.table[name].config = econfig
def req_indices(self, name: str, con: DependencyConstraint, src: DependencySource):
if con.is_version():
git_ver = src.versions
con: GitVersions = con.value
ids = dict(map(lambda eitem: (eitem[1], eitem[0]),
enumerate(git_ver.revisions)))
# logger.debug(ids)
def try_match_ver(item):
v, h = item
if con.matches(v):
return ids[h]
else:
return None
revs = set(filter(lambda x: (not x is None), map(
try_match_ver, git_ver.versions)))
return revs
elif con.is_revision():
git_ver = src.versions
git_refs: dict = git_ver.refs
git_revs: list = git_ver.revisions
con: str = con.value
revs = set()
ref = git_refs.get(con, None)
if ref:
idx = git_revs.index(ref)
revs.add(idx)
else:
enum_revs = enumerate(git_revs)
revs_map = map(
lambda item: item[0] if item[1].startswith(con) else None,
enum_revs
)
revs_filter = filter(lambda x: x, revs_map)
revs = set(revs_filter)
return revs
else:
raise RuntimeError("INTERNAL ERROR")
def impose(self, name: str, con: DependencyConstraint, src: DependencySource, all_cons: list):
indices = self.req_indices(name, con, src)
if not indices:
raise RuntimeError(
'Dependency {} from {} cannot statisfy requirement {}'.format(
name,
self.enzi.dependecy(src.id).source.git_url,
str(con)
))
def extract_id(state: State):
if state.is_open():
raise RuntimeError('INTERNAL ERROR: unreachable, state = Open')
elif state.is_locked():
raise RuntimeError('INTERNAL ERROR: unreachable')
elif state.is_constrained() or state.is_pick():
ids = state.ids
is_ids = ids.intersection(indices)
if not is_ids:
msg_buf = ["Requirement {} conflicts with other requirement on dependency | |
<reponame>sratgh/CarND-Capstone
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32, Bool
from geometry_msgs.msg import PoseStamped, Pose, TwistStamped
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
import os
import sys
import math
import numpy as np
# This calibration paramter debounces the light state
# received from the camera, such that toggeling between
# different states is avoided in case the tl_classifier
# is not sure
STATE_COUNT_THRESHOLD = 5
# This calibration paramter decides if images are saved
# to the linux-filesystem. This may sacrifice some computational
# power in favour of having the images for later analysis.
SAVE_CAMERA_IMAGES_IS_ACTIVE = False
# This calibration paramter decides if the traffic classifer
# light classifer is used or the state of the traffic light
# is taken from the simulator. Turn this to True only when
# using the code in the simulator!
USE_TRAFFIC_LIGHT_STATE_FROM_SIMULATOR = False
# This calibration paramter renders the rate for
# proceesing images and detecting traffic lights
# It should be chosen by ansering the question how fast
# do images change and traffic lights disappear?
# Unit is Hz
TRAFFIC_LIGHT_DETECTION_UPDATE_FREQUENCY = 2
# This calibration parameter allwos to tune the threshold in meters for paying
# attention to the state of traffic light. Below that threshold, camea images
# are processed, above this is not done.
SAFE_DISTANCE_TO_TRAFFIC_LIGHT = 60 #80
SAFE_DISTANCE_TO_STOP_LINE = 40 # 60
# Distance to start decelerating. This distance is the threshold in meters for
# starting to slow down the vehicle. This parameter is related with the definition
# of the functions to define the reference velocity, thus, when modifing it, the
# MID_POINT parameter in waypoint_updater must be modified also. This distance is
# measured from the
DISTANCE_START_DECELERATING = 120 # 180
# State machine parameters
NUM_PREV_STATES = 5 # Number of previous states to be saved
CRUISE = 1
DECELERATING = 2
STOPPED = 3
SPEEDING_UP = 4
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.camera_image = None
self.lights = []
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
self.directory_for_images = '/data/'
self.image_counter = 0
self.image_counter_red = 0
self.image_counter_yellow = 0
self.image_counter_green = 0
self.tl_prev_states = [-1]*NUM_PREV_STATES # Save the last five states
self.car_state = CRUISE
self.current_vel = None
self.counter_stopped = 0
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.stop_line_positions = self.config['stop_line_positions']
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.distance_to_traffic_light_pub = rospy.Publisher('/distance_to_traffic_light', Int32, queue_size=1)
self.distance_to_stop_line_pub = rospy.Publisher('/distance_to_stop_line', Int32, queue_size=1)
self.stopped_time_pub = rospy.Publisher('/stopped_time', Int32, queue_size=1)#JUST FOR DEBUGGING
self.close_to_tl_pub = rospy.Publisher('/close_to_tl', Bool, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
try:
self.light_classifier.load_model("light_classification/tl_classifier_mobilenet.h5")
except ValueError:
print("Cannot find classification model. Check if it exists.")
USE_TRAFFIC_LIGHT_STATE_FROM_SIMULATOR = True
rospy.loginfo("tl_detector: Since no classification model can be found, set USE_TRAFFIC_LIGHT_STATE_FROM_SIMULATOR to True")
self.listener = tf.TransformListener()
self.loop() #rospy.spin()
def loop(self):
"""
This member function manages all threads inside the tl_detector node and
makes the execution deterministic.
"""
rate = rospy.Rate(TRAFFIC_LIGHT_DETECTION_UPDATE_FREQUENCY)
while not rospy.is_shutdown():
if not None in (self.waypoints, self.pose, self.camera_image):
light_wp, state,close_to_tl = self.process_traffic_lights()
output_light_wp = light_wp
prev_state = self.car_state
#rospy.loginfo('light_wp',light_wp,'prev_state',self.state)
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
''' State machine '''
self.tl_prev_states.pop(0)
self.tl_prev_states.append(state)
# Counts the number of red light detections in the last NUM_PREV_STATES
count_red = self.tl_prev_states.count(TrafficLight.RED)
# Counts the number of green light detections in the last NUM_PREV_STATES
count_green = self.tl_prev_states.count(TrafficLight.GREEN)
pub_light_wp = -1
if (self.car_state == CRUISE) and (light_wp>=0):
'''If it is in cruise and count_red is higher than the specified value
our car starts decelerating'''
self.car_state = DECELERATING
self.last_wp = light_wp
pub_light_wp = light_wp
rospy.loginfo("tl_detector: DECELERATING")
if (self.car_state == DECELERATING):
pub_light_wp = self.last_wp
#if (light_wp==-1):
# self.car_state = SPEEDING_UP
# pub_light_wp = -1
#if close_to_tl:
#if (count_red>=3):
# ''' If it is decelerating and detects red light, it updates the last_wp
# in order to stop in the stop line'''
# self.last_wp = light_wp
# pub_light_wp = light_wp
if (count_green>=5):
''' If it is decelerating but detects green light, it continues in
cruise'''
self.car_state = SPEEDING_UP
pub_light_wp = -1
rospy.loginfo("tl_detector: SPEEDING_UP")
if (abs(self.current_vel)<=0.5):
''' If it is decelerating and the velocity is lower than specified it
goes to stopped state'''
self.car_state = STOPPED
rospy.loginfo("tl_detector: STOPPED")
if (self.car_state == STOPPED):
pub_light_wp = self.last_wp
stopped_time = self.counter_stopped/TRAFFIC_LIGHT_DETECTION_UPDATE_FREQUENCY
self.stopped_time_pub.publish(stopped_time)
if (count_green>=5):# or stopped_time>30:
''' If it is stopped and our traffic light turns on green, it changes
to speeding up'''
self.car_state = SPEEDING_UP
pub_light_wp = -1
rospy.loginfo("tl_detector: SPEEDING_UP")
self.counter_stopped = 0
self.counter_stopped = self.counter_stopped + 1
if (self.car_state == SPEEDING_UP):
pub_light_wp = -1
if self.beyond_tl():
'''If it is beyond the traffic light, it goes to cruise state'''
self.car_state = CRUISE
self.tl_prev_states = [-1]*NUM_PREV_STATES
rospy.loginfo("tl_detector: CRUISE")
#rospy.loginfo('prev_state %s'%prev_state+' state %s'%self.car_state+' prev_light_wp %s'%output_light_wp+' pub_light_wp %s'%pub_light_wp)
#rospy.loginfo('light_wp',light_wp,'prev_state',self.state)
self.upcoming_red_light_pub.publish(Int32(pub_light_wp))
self.close_to_tl_pub.publish(close_to_tl)
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
#else:
# rospy.loginfo("tl_detector: Missing information, traffic light detection aborted.")
'''
rate.sleep()
def pose_cb(self, msg):
"""
This member function is called when pose is published in order to keep
the current pose as a member variable.
"""
self.pose = msg
def waypoints_cb(self, waypoints):
"""
This member function is called when waypoints is published in order to keep
the waypoints as a member variable.
"""
self.waypoints = waypoints
number_of_waypoints = len(self.waypoints.waypoints)
#rospy.loginfo("tl_detector: Catched %d waypoints", number_of_waypoints)
def traffic_cb(self, msg):
"""
This member function is called when the state of the traffic lights are published in order to keep
is as a member variable.
"""
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
#rospy.loginfo("tl_detector: Catched an image.")
self.has_image = True
self.camera_image = msg
if SAVE_CAMERA_IMAGES_IS_ACTIVE:
self.save_image(msg)
def save_image(self, img):
"""
This member function catches images and saves them to disc.
Arguments:
img: The image from the simulator.
"""
curr_dir = os.path.dirname(os.path.realpath(__file__))
img.encoding = "rgb8"
cv_image = CvBridge().imgmsg_to_cv2(img, "bgr8")
# pred_img = self.light_classifier.preprocess_image(img=cv_image)
pred_img = cv2.resize(cv_image, (224,224))
# pred_img = np.array(img).astype('float32')/255
# pred_img = np.expand_dims(img, axis=0)
file_name = curr_dir + self.directory_for_images+ 'none/img_'+'%06d'% self.image_counter +'.png'
self.image_counter += 1
stop_line_waypoint_index = -1
state_of_traffic_light = TrafficLight.UNKNOWN
light = None
stop_line_position = None
stop_line_waypoint_index = None
distance = lambda a,b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
if not None in (self.waypoints, self.pose):
vehicle_index = self.get_index_of_closest_waypoint_to_current_pose(self.pose.pose.position)
vehicle_position = self.waypoints.waypoints[vehicle_index].pose.pose.position
traffic_light_index = self.get_index_of_closest_traffic_light_to_current_pose(vehicle_position)
if traffic_light_index >= 0:
traffic_light_waypoint_index = self.get_index_of_closest_waypoint_to_current_pose(self.lights[traffic_light_index].pose.pose.position)
traffic_light_position = self.waypoints.waypoints[traffic_light_waypoint_index].pose.pose.position
if traffic_light_waypoint_index > vehicle_index:
distance_to_traffic_light = distance(vehicle_position, traffic_light_position)
if distance_to_traffic_light < SAFE_DISTANCE_TO_TRAFFIC_LIGHT * 2 and distance_to_traffic_light > 15:
traffic_light_state = self.lights[traffic_light_index].state
if traffic_light_state == TrafficLight.RED:
file_name = curr_dir + self.directory_for_images+ 'red/img_'+'%06d'% self.image_counter_red +'.png'
self.image_counter_red += 1
self.image_counter -= 1
cv2.imwrite(file_name, pred_img)
elif traffic_light_state == TrafficLight.YELLOW:
file_name = curr_dir + self.directory_for_images+ 'yellow/img_'+'%06d'% self.image_counter_yellow +'.png'
self.image_counter_yellow += 1
self.image_counter -= 1
cv2.imwrite(file_name, pred_img)
elif traffic_light_state == TrafficLight.GREEN:
file_name = curr_dir + self.directory_for_images+ 'green/img_'+'%06d'% self.image_counter_green +'.png'
self.image_counter_green += 1
self.image_counter -= 1
cv2.imwrite(file_name, pred_img)
if self.image_counter % 4 == 0:
cv2.imwrite(file_name, pred_img)
# self.image_counter += 1
# rospy.loginfo("tl_detector.py: Camera image saved to %s!", file_name)
def get_light_state(self, light):
"""
This member function determines the current color of the traffic light.
Arguments:
light (TrafficLight): light to classify
Returns:
int: | |
= []
local_var_files = {}
if 'custom_signature_id' in params:
form_params.append(('custom_signature_id', params['custom_signature_id']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/vnd.api+json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/vnd.api+json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CustomSignatureDefinition',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete(self, id, **kwargs):
"""
Delete a(n) Custom Signature Definition
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Custom Signature Definition ID (required)
:return: Meta
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_with_http_info(id, **kwargs)
else:
(data) = self.delete_with_http_info(id, **kwargs)
return data
def delete_with_http_info(self, id, **kwargs):
"""
Delete a(n) Custom Signature Definition
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Custom Signature Definition ID (required)
:return: Meta
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete`")
collection_formats = {}
resource_path = '/api/v2/custom_signature_definitions/{id}.json_api'.replace('{format}', 'json_api')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/vnd.api+json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/vnd.api+json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Meta',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list(self, **kwargs):
"""
Get a list of Custom Signature Definitions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str include: Related objects that can be included in the response: custom_signature, results See Including Objects for more information.
:param dict(str, str) filter: Filter Params for Searching. Equality Searchable Attributes: [id, language, status, version_number] Searchable Association: [custom_signature] See Searching Lists for more information. See the filter parameter of the association's list action to see what attributes are searchable on each association. See Conditions on Relationships in Searching Lists for more information.
:param str page: Page Number and Page Size. Number is the page number of the collection to return, size is the number of items to return per page.
:return: PaginatedCollection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_with_http_info(**kwargs)
else:
(data) = self.list_with_http_info(**kwargs)
return data
def list_with_http_info(self, **kwargs):
"""
Get a list of Custom Signature Definitions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str include: Related objects that can be included in the response: custom_signature, results See Including Objects for more information.
:param dict(str, str) filter: Filter Params for Searching. Equality Searchable Attributes: [id, language, status, version_number] Searchable Association: [custom_signature] See Searching Lists for more information. See the filter parameter of the association's list action to see what attributes are searchable on each association. See Conditions on Relationships in Searching Lists for more information.
:param str page: Page Number and Page Size. Number is the page number of the collection to return, size is the number of items to return per page.
:return: PaginatedCollection
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['include', 'filter', 'page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/api/v2/custom_signature_definitions.json_api'.replace('{format}', 'json_api')
path_params = {}
query_params = {}
if 'include' in params:
query_params['include'] = params['include']
header_params = {}
form_params = []
local_var_files = {}
if 'filter' in params:
form_params.append(('filter', params['filter']))
if 'page' in params:
form_params.append(('page', params['page']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/vnd.api+json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/vnd.api+json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaginatedCollection',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def show(self, id, **kwargs):
"""
Show a single Custom Signature Definition
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.show(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Custom Signature Definition ID (required)
:param str include: Related objects that can be included in the response: custom_signature, results See Including Objects for more information.
:return: CustomSignatureDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.show_with_http_info(id, **kwargs)
else:
(data) = self.show_with_http_info(id, **kwargs)
return data
def show_with_http_info(self, id, **kwargs):
"""
Show a single Custom Signature Definition
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.show_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Custom Signature Definition ID (required)
:param str include: Related objects that can be included in the response: custom_signature, results See Including Objects for more information.
:return: CustomSignatureDefinition
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'include']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method show" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `show`")
collection_formats = {}
resource_path = '/api/v2/custom_signature_definitions/{id}.json_api'.replace('{format}', 'json_api')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'include' in params:
query_params['include'] = params['include']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/vnd.api+json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/vnd.api+json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CustomSignatureDefinition',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update(self, id, **kwargs):
"""
Update a(n) Custom Signature Definition
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Custom Signature Definition ID (required)
:param str include: Related | |
{'Type': 'Digital', 'x': 1189, 'y': 1752, 'width': 1032}, # Bottom hat down
'Joy_7': {'Type': 'Digital', 'x': 1189, 'y': 1808, 'width': 1032}, # Bottom hat left
'Joy_8': {'Type': 'Digital', 'x': 1189, 'y': 1640, 'width': 1032}, # Bottom hat up
'Joy_9': {'Type': 'Digital', 'x': 1198, 'y': 1275, 'width': 832}, # Rear hat down
'Joy_10': {'Type': 'Digital', 'x': 1198, 'y': 1219, 'width': 832}, # Rear hat right
'Joy_11': {'Type': 'Digital', 'x': 1198, 'y': 1163, 'width': 832}, # Rear hat up
'Joy_12': {'Type': 'Digital', 'x': 1198, 'y': 1331, 'width': 832}, # Rear hat left
'Joy_13': {'Type': 'Digital', 'x': 1261, 'y': 1549, 'width': 932}, # Mouse button
'Joy_14': {'Type': 'Digital', 'x': 1042, 'y': 814, 'width': 892}, # Right back
'Joy_15': {'Type': 'Digital', 'x': 904, 'y': 724, 'width': 892}, # Middle back
'Joy_16': {'Type': 'Digital', 'x': 684, 'y': 634, 'width': 892}, # Left back
'Joy_POV1Up': {'Type': 'Digital', 'x': 132, 'y': 1896, 'width': 932}, # PoV hat up (looks like witch hat)
'Joy_POV1Right': {'Type': 'Digital', 'x': 132, 'y': 1952, 'width': 932}, # PoV hat right (looks like witch hat)
'Joy_POV1Down': {'Type': 'Digital', 'x': 132, 'y': 2008, 'width': 932}, # PoV hat down (looks like witch hat)
'Joy_POV1Left': {'Type': 'Digital', 'x': 132, 'y': 1840, 'width': 932}, # PoV hat left (looks like witch hat)
'Joy_XAxis': {'Type': 'Analogue', 'x': 1261, 'y': 1493, 'width': 932}, # Mouse X axis
'Joy_YAxis': {'Type': 'Analogue', 'x': 1261, 'y': 1437, 'width': 932}, # Mouse Y axis
'Joy_ZAxis': {'Type': 'Analogue', 'x': 80, 'y': 714, 'width': 692}, # Throttle
},
'CHProThrottle2': { # CH Pro Throttle with CH manager
'Joy_1': {'Type': 'Digital', 'x': 1261, 'y': 1549, 'width': 932}, # Mouse button
'Joy_2': {'Type': 'Digital', 'x': 1042, 'y': 814, 'width': 892}, # Right back
'Joy_3': {'Type': 'Digital', 'x': 904, 'y': 724, 'width': 892}, # Middle back
'Joy_4': {'Type': 'Digital', 'x': 684, 'y': 634, 'width': 892}, # Left back
'Joy_5': {'Type': 'Digital', 'x': 1198, 'y': 1275, 'width': 832}, # Rear hat down
'Joy_6': {'Type': 'Digital', 'x': 1198, 'y': 1219, 'width': 832}, # Rear hat right
'Joy_7': {'Type': 'Digital', 'x': 1198, 'y': 1163, 'width': 832}, # Rear hat up
'Joy_8': {'Type': 'Digital', 'x': 1198, 'y': 1331, 'width': 832}, # Rear hat left
'Joy_9': {'Type': 'Digital', 'x': 1114, 'y': 966, 'width': 832}, # Castle hat right
'Joy_10': {'Type': 'Digital', 'x': 1114, 'y': 1022, 'width': 832}, # Castle hat down
'Joy_11': {'Type': 'Digital', 'x': 1114, 'y': 1078, 'width': 832}, # Castle hat left
'Joy_12': {'Type': 'Digital', 'x': 1114, 'y': 910, 'width': 832}, # Castle hat up
'Joy_13': {'Type': 'Digital', 'x': 1189, 'y': 1696, 'width': 1032}, # Bottom hat right
'Joy_14': {'Type': 'Digital', 'x': 1189, 'y': 1752, 'width': 1032}, # Bottom hat down
'Joy_15': {'Type': 'Digital', 'x': 1189, 'y': 1808, 'width': 1032}, # Bottom hat left
'Joy_16': {'Type': 'Digital', 'x': 1189, 'y': 1640, 'width': 1032}, # Bottom hat up
'Joy_POV1Up': {'Type': 'Digital', 'x': 132, 'y': 1896, 'width': 932}, # PoV hat up (looks like witch hat)
'Joy_POV1Right': {'Type': 'Digital', 'x': 132, 'y': 1952, 'width': 932}, # PoV hat right (looks like witch hat)
'Joy_POV1Down': {'Type': 'Digital', 'x': 132, 'y': 2008, 'width': 932}, # PoV hat down (looks like witch hat)
'Joy_POV1Left': {'Type': 'Digital', 'x': 132, 'y': 1840, 'width': 932}, # PoV hat left (looks like witch hat)
'Joy_XAxis': {'Type': 'Analogue', 'x': 1261, 'y': 1493, 'width': 932}, # Mouse X axis
'Joy_YAxis': {'Type': 'Analogue', 'x': 1261, 'y': 1437, 'width': 932}, # Mouse Y axis
'Joy_ZAxis': {'Type': 'Analogue', 'x': 80, 'y': 714, 'width': 692}, # Throttle
},
'068EC010': { # CH Pro Throttle with CH manager, alternate
'displayName': 'CH Pro Throttle with CH manager, alternate',
'Joy_1': {'Type': 'Digital', 'x': 1261, 'y': 1549, 'width': 932}, # Mouse button
'Joy_2': {'Type': 'Digital', 'x': 1042, 'y': 814, 'width': 892}, # Right back
'Joy_3': {'Type': 'Digital', 'x': 904, 'y': 724, 'width': 892}, # Middle back
'Joy_4': {'Type': 'Digital', 'x': 684, 'y': 634, 'width': 892}, # Left back
'Joy_5': {'Type': 'Digital', 'x': 1198, 'y': 1275, 'width': 832}, # Rear hat down
'Joy_6': {'Type': 'Digital', 'x': 1198, 'y': 1219, 'width': 832}, # Rear hat right
'Joy_7': {'Type': 'Digital', 'x': 1198, 'y': 1163, 'width': 832}, # Rear hat up
'Joy_8': {'Type': 'Digital', 'x': 1198, 'y': 1331, 'width': 832}, # Rear hat left
'Joy_9': {'Type': 'Digital', 'x': 1114, 'y': 966, 'width': 832}, # Castle hat right
'Joy_10': {'Type': 'Digital', 'x': 1114, 'y': 1022, 'width': 832}, # Castle hat down
'Joy_11': {'Type': 'Digital', 'x': 1114, 'y': 1078, 'width': 832}, # Castle hat left
'Joy_12': {'Type': 'Digital', 'x': 1114, 'y': 910, 'width': 832}, # Castle hat up
'Joy_13': {'Type': 'Digital', 'x': 1189, 'y': 1696, 'width': 1032}, # Bottom hat right
'Joy_14': {'Type': 'Digital', 'x': 1189, 'y': 1752, 'width': 1032}, # Bottom hat down
'Joy_15': {'Type': 'Digital', 'x': 1189, 'y': 1808, 'width': 1032}, # Bottom hat left
'Joy_16': {'Type': 'Digital', 'x': 1189, 'y': 1640, 'width': 1032}, # Bottom hat up
'Joy_POV1Up': {'Type': 'Digital', 'x': 132, 'y': 1896, 'width': 932}, # PoV hat up (looks like witch hat)
'Joy_POV1Right': {'Type': 'Digital', 'x': 132, 'y': 1952, 'width': 932}, # PoV hat right (looks like witch hat)
'Joy_POV1Down': {'Type': 'Digital', 'x': 132, 'y': 2008, 'width': 932}, # PoV hat down (looks like witch hat)
'Joy_POV1Left': {'Type': 'Digital', 'x': 132, 'y': 1840, 'width': 932}, # PoV hat left (looks like witch hat)
'Joy_XAxis': {'Type': 'Analogue', 'x': 1261, 'y': 1493, 'width': 932}, # Mouse X axis
'Joy_YAxis': {'Type': 'Analogue', 'x': 1261, 'y': 1437, 'width': 932}, # Mouse Y axis
'Joy_ZAxis': {'Type': 'Analogue', 'x': 80, 'y': 714, 'width': 692}, # Throttle
},
'068E00FA': { # CH Quadrant throttle
'displayName': 'CH Quadrant throttle',
'Joy_XAxis': {'Type': 'Analogue', 'x': 354, 'y': 804, 'width': 932}, # Throttle 1
'Joy_YAxis': {'Type': 'Analogue', 'x': 554, 'y': 564, 'width': 932}, # Throttle 2
'Joy_ZAxis': {'Type': 'Analogue', 'x': 754, 'y': 324, 'width': 932}, # Throttle 3
'Joy_RXAxis': {'Type': 'Analogue', 'x': 2214, 'y': 324, 'width': 932}, # Throttle 4
'Joy_RYAxis': {'Type': 'Analogue', 'x': 2414, 'y': 564, 'width': 932}, # Throttle 5
'Joy_RZAxis': {'Type': 'Analogue', 'x': 2614, 'y': 804, 'width': 932}, # Throttle 6
'Joy_1': {'Type': 'Digital', 'x': 354, 'y': 1484, 'width': 932}, # Switch 1 up
'Joy_2': {'Type': 'Digital', 'x': 354, 'y': 1540, 'width': 932}, # Switch 1 down
'Joy_3': {'Type': 'Digital', 'x': 554, 'y': 1724, 'width': 932}, # Switch 2 up
'Joy_4': {'Type': 'Digital', 'x': 554, 'y': 1780, 'width': 932}, # Switch 2 down
'Joy_5': {'Type': 'Digital', 'x': 754, 'y': 1964, 'width': 932}, # Switch 3 up
'Joy_6': {'Type': 'Digital', 'x': 754, 'y': 2020, 'width': 932}, # Switch 3 down
'Joy_7': {'Type': 'Digital', 'x': 2214, 'y': 1964, 'width': 932}, # Switch 4 up
'Joy_8': {'Type': 'Digital', 'x': 2214, 'y': 2020, 'width': 932}, # Switch 4 down
'Joy_9': {'Type': 'Digital', 'x': 2414, 'y': 1724, 'width': 932}, # Switch 5 up
'Joy_10': {'Type': 'Digital', 'x': 2414, 'y': 1780, 'width': 932}, # Switch 5 down
'Joy_11': {'Type': 'Digital', 'x': 2614, 'y': 1484, 'width': 932}, # Switch 6 up
'Joy_12': {'Type': 'Digital', 'x': 2614, 'y': 1540, 'width': 932}, # Switch 6 down
},
'06A30C2D': { # CH Pro Flight Quadrant throttle
'displayName': 'CH Pro Flight Quadrant throttle',
'Joy_XAxis': {'Type': 'Analogue', 'x': 554, 'y': 564, 'width': 932}, # Throttle 1
'Joy_YAxis': {'Type': 'Analogue', 'x': 1474, 'y': 324, 'width': 932}, # Throttle 2
'Joy_ZAxis': {'Type': 'Analogue', 'x': 2414, 'y': 564, 'width': 932}, # Throttle 3
'Joy_1': {'Type': 'Digital', 'x': 354, 'y': 1484, 'width': 932}, # Switch 1 up
'Joy_2': {'Type': 'Digital', 'x': 354, 'y': 1540, 'width': 932}, # Switch 1 down
'Joy_3': {'Type': 'Digital', 'x': 1474, 'y': 1754, 'width': 932}, # Switch 2 up
'Joy_4': {'Type': 'Digital', 'x': 1474, 'y': 1810, 'width': 932}, # Switch 2 down
'Joy_5': {'Type': 'Digital', 'x': 2614, 'y': 1484, 'width': 932}, # Switch 3 up
'Joy_6': {'Type': 'Digital', 'x': 2614, 'y': 1540, 'width': 932}, # Switch 3 down
},
'ThrustMasterHOTAS4': {
'Joy_1': {'Type': 'Digital', 'x': 1394, 'y': 614, 'width': 1082}, # Stick primary trigger
'Joy_2': {'Type': 'Digital', 'x': 2114, 'y': | |
{}
self.point_db = {}
self.gs = None
self._ref_gs = GeometrySearch(GEO_FILL_LENGTH, use_dist=True, perform_self_testing=perform_self_testing)
self._ref_way_db = {}
self._way_db_iter = None
self._way_db_sub_iter = None
self._perform_self_testing = perform_self_testing
# Expected properties of 'reference_geometry':
#
# - All points in a ways that should be connected are close (within SNAP_POINT_DISTANCE)
# from a point in the connecting way, which is an endpoint
# - No overlaps or duplicates
# - There may be very short segments (cleaned up)
# - Segments with the same RLID may sometimes be disconnected
# - There may be closed ways (start and end point the same), but no self-crossing ways
#
# After processing:
# - Any points closer than SNAP_POINT_DISTANCE have been merged to a single point
# - Segments with same RLID has been connected to as long segments as possible
# - Store in a GeometrySearch object with distance on each point
#
# group segments per RLID, and register all endpoints and midpoints (=non-endpoints) for searching
rlid_ways = {}
endpoints = TwoDimSearch()
point_count = 0
last_print = 0
_log.info("Setting up endpoint 2D search data structures...")
for idx, way in enumerate(reference_geometry):
last_print = print_progress(last_print, idx, len(reference_geometry),
progress_text="endpoint 2D search data structures")
for ep in (way.way[0], way.way[-1]):
endpoints.insert(ep, way)
point_count += len(way.way)
if way.rlid in rlid_ways:
rlid_ways[way.rlid].append(way)
else:
rlid_ways[way.rlid] = [way]
_log.info(f"({len(endpoints)} endpoints of {point_count} total points)")
_log.info("Snap points to nearby endpoints.")
# Due to snapping we may introduce duplicate points in the ways, which is ok as we
# remove them later.
ep_count, mp_count = self._snap_points_to_nearby_endpoints(rlid_ways, endpoints)
_log.info(f"done (snapped {ep_count} endpoints and {mp_count} midpoints)")
# In rare cases DKReflinjetillkomst has lines cut short, we try to connect those
_log.info("Snap still unconnected endpoints to nearby points by extension...")
ep_count = 0
uc_count = 0
second_pass = []
midpoints = TwoDimSearch()
for ways in rlid_ways.values():
for way in ways:
for mp in way.way[1:-1]:
midpoints.insert(mp, way)
for ways in rlid_ways.values():
for way in ways:
if way.way[0] == way.way[-1]:
continue # makes no sense to extend-snap closed loops
for way_idx in [ 0, -1 ]:
if len(endpoints[way.way[way_idx]]) > 1:
# already connected
continue
uc_count += 1
min_ext_dist, min_dev_dist, p = extend_and_snap(way.way, way_idx == 0, endpoints, midpoints, self.POINT_SNAP_DISTANCE, self.MAX_SNAP_DISTANCE)
if p is None:
continue
if min_dev_dist > 1e-5:
# keep very tight limit on first pass so we extend in the right order
second_pass.append(way)
continue
_log.info(f"extend snap ext:{min_ext_dist:g} dev:{min_dev_dist:g} for RLID {way.rlid}"
f" at {latlon_str(p)}")
endpoints.remove(way.way[way_idx], way) # must be removed before midpoint test below so we don't snap to ourself
if p in midpoints:
# for midpoints it may be better to snap to an endpoint instead
p = self._snap_to_nearby_point(p, endpoints, self.MAX_SNAP_DISTANCE)
endpoints.insert(p, way)
way.way[way_idx] = Point(p.x, p.y)
ep_count += 1
for way in second_pass:
for way_idx in [ 0, -1 ]:
if len(endpoints[way.way[way_idx]]) > 1:
continue
min_ext_dist, min_dev_dist, p = extend_and_snap(way.way, way_idx == 0, endpoints, midpoints, self.POINT_SNAP_DISTANCE, self.MAX_SNAP_DISTANCE)
if p is None:
continue
_log.info(f"extend snap ext:{min_ext_dist:g} dev:{min_dev_dist:g} for RLID {way.rlid} at {latlon_str(p)}")
endpoints.remove(way.way[way_idx], way)
if p in midpoints:
p = self._snap_to_nearby_point(p, endpoints, self.MAX_SNAP_DISTANCE)
endpoints.insert(p, way)
way.way[way_idx] = Point(p.x, p.y)
ep_count += 1
_log.info(f"done (snapped {ep_count} endpoints, {uc_count - ep_count} still unconnected)")
if ep_count > 0:
_log.warning("extend snaps typically means that there are gaps in the data source's geometry")
if self._perform_self_testing:
for ways in rlid_ways.values():
for way in ways:
for ep in [ way.way[0], way.way[-1] ]:
dist, new_point, _ = endpoints.find_nearest_within(ep, self.POINT_SNAP_DISTANCE, exclude_self=True)
if new_point is not None:
_log.error(f"endpoints placed too closely together: {dist}, {ep}, {new_point}")
raise RuntimeError("endpoints placed too closely together")
_log.info("Join segments with same RLID and insert to search data structure...")
self._insert_into_reference_geometry(rlid_ways, endpoints)
_log.info("done")
def __iter__(self):
self._way_db_iter = iter(self.way_db.values())
self._way_db_sub_iter = None
return self
def __next__(self):
if self._way_db_sub_iter is None:
ways = next(self._way_db_iter) # when StopIteration is raised iteration is complete
self._way_db_sub_iter = iter(ways)
try:
way = next(self._way_db_sub_iter)
return way
except StopIteration:
self._way_db_sub_iter = None
return self.__next__()
@staticmethod
def _snap_to_nearby_point(p, snappoints, snap_distance):
_, snap, _ = snappoints.find_nearest_within(p, snap_distance, exclude_self=True)
if snap is None:
return p
return snap
def _snap_points_to_nearby_endpoints(self, rlid_ways, endpoints):
ep_count = 0
mp_count = 0
midpoints = []
prev_count = -1
pass_count = 0
while pass_count < 2 or prev_count != ep_count + mp_count:
snapped_points = set()
prev_count = ep_count + mp_count
if pass_count == 0:
# snap really close one first to make sure we don't make a unnecessarily long snap
snap_distance = 0.001
else:
snap_distance = self.POINT_SNAP_DISTANCE
for ways in rlid_ways.values():
for way in ways:
for way_idx in range(0, len(way.way)):
is_midpoint = way_idx not in (0, len(way.way) - 1)
ep_list = endpoints.find_all_within_list(way.way[way_idx], snap_distance)
if len(ep_list) == 0:
# the own endpoint is expected to exist in endpoints set
assert is_midpoint
continue
if len(ep_list) == 1 and not is_midpoint:
assert ep_list[0][0] == way.way[way_idx]
continue
new_point = ep_list[0][0]
if pass_count == 0:
# first pass we can pick any point due to short snap distance
snapped_points.add(new_point)
else:
# prefer to snap to a point already snapped
for ep in ep_list:
if ep[0] in snapped_points:
new_point = ep[0]
break
# move all the nearby endpoints to the point we have chosen for snapping (new_point)
for ep in ep_list:
old_point = ep[0]
if old_point == new_point:
continue
ep_set = ep[1]
endpoints.remove_set(old_point)
for w in ep_set:
if w.way[0] == old_point:
w.way[0] = Point(new_point.x, new_point.y)
if w.way[-1] == old_point:
w.way[-1] = Point(new_point.x, new_point.y)
assert new_point in (w.way[0], w.way[-1])
ep_count += 1
endpoints.insert(new_point, w)
midpoints.append((way_idx, way))
if way.way[way_idx] != new_point:
assert is_midpoint
way.way[way_idx] = Point(new_point.x, new_point.y)
mp_count += 1
_log.debug(f"snap_counts {mp_count} {ep_count}")
pass_count += 1
# also add connected midpoints, we need to do it here afterwards to not disturb the multiple pass endpoint snapping
# FIXME: modifying contents and meaning of endpoints is a hard-to-follow side effect
for mp in midpoints:
idx = mp[0]
way = mp[1]
endpoints.insert(way.way[idx], way)
return ep_count, mp_count
def _insert_into_reference_geometry(self, rlid_ways, endpoints):
last_print = 0
for idx, ways in enumerate(rlid_ways.values()):
if len(rlid_ways) > 50:
last_print = print_progress(last_print, idx, len(rlid_ways), progress_text="Join segments")
# make longest possible ways of RLID segments
joined_ways = join_ways(ways)
joined_ways = join_DKReflinjetillkomst_gaps(joined_ways)
joined_ways = remove_DKReflinjetillkomst_overlaps(joined_ways, self.POINT_SNAP_DISTANCE)
for way in joined_ways:
# very short segments lead to problems with snapping (can cause gaps where there should not be any)
new_way = remove_short_segments_and_redundant_points(way, self.POINT_SNAP_DISTANCE, endpoints)
if len(new_way) < 2:
_log.debug(f"Skipping zero length segment for {way.rlid}")
continue
assert new_way[0] == way.way[0] and new_way[-1] == way.way[-1]
way.way = new_way
test_self_connections(way)
if self._perform_self_testing:
self._test_way_dist(way, allow_unset=True)
self._ref_gs.insert(way)
self._test_way_dist(self._ref_gs.find_reference_way(way.way[0], way.rlid))
if way.rlid in self._ref_way_db:
self._ref_way_db[way.rlid].append(way)
else:
self._ref_way_db[way.rlid] = [ way ]
def get_reference_geometry(self):
ref_ways = []
for ways in self._ref_way_db.values():
for way in ways:
ref_ways.append(way)
return ref_ways
def insert_missing_reference_geometry_if_any(self, geometry_ways):
missing_ways = {}
for way in geometry_ways:
if not way.rlid in self._ref_way_db:
wc = way.make_copy_new_way(copy_way(way.way))
if way.rlid in missing_ways:
missing_ways[way.rlid].append(wc)
else:
missing_ways[way.rlid] = [ wc ]
if len(missing_ways) == 0:
return False
# snap endpoints to self
endpoints = TwoDimSearch()
rlids = []
for ways in list(missing_ways.values()):
# this type of geometry may have overlaps, so we pre-join using NVDB tags
ways = join_ways_using_nvdb_tags(ways, self.POINT_SNAP_DISTANCE)
missing_ways[ways[0].rlid] = ways
rlids.append(ways[0].rlid)
for way in ways:
for ep in [ way.way[0], way.way[-1] ]:
endpoints.insert(ep, way)
self._snap_points_to_nearby_endpoints(missing_ways, endpoints)
self._insert_into_reference_geometry(missing_ways, endpoints)
# Missing segments may be missing because they have been snapped to zero and thus excluded.
# If that is the case they won't be reinserted either, so we only log after the insertion
# we we know if any actually got in.
did_insert = False
for rlid in rlids:
if rlid in self._ref_way_db:
_log.warning(f"RLID {rlid} was not in reference geometry, inserted it.")
did_insert = True
return did_insert
def insert_rlid_node(self, node, data_src_name, do_snap=True):
did_snap = False
if do_snap:
dist, p, snap_way = self._ref_gs.snap_point_into_geometry(node.way, self.POINT_SNAP_DISTANCE, self.MAX_SNAP_DISTANCE)
if p is None:
_log.warning(f"node with RLID {node.rlid} {latlon_str(node.way)} in {data_src_name} has no"
f" existing geometry within {self.MAX_SNAP_DISTANCE} meters")
else:
did_snap = True
if dist > self.POINT_SNAP_DISTANCE:
_log.info("Node %s snap distance %s", node.rlid, dist)
node.way.x = p[0]
node.way.y = | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Pluralsight, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of various Maximum Likelihood Estimation methods
The various methods below fit a logistic functions to the binned
response percentiles for each questions.
The input data is for a single question and consists of theta, r, and f;
theta: a regularly spaced set of ability levels
r: the number of correct responses for each ability group in theta
f: the total number of responses for each ability group in theta
Percentages (p) are just r/f
(However, I have now refactored the code to eliminate this division, which
may help some /0 headaches and should make it easier to extend this to
possibly more generic methods in the future (perhaps a KDE-based one?))
a is the discriminatory power (slope):
larger lambda means more discrimination (closer for a step-function)
b is the difficulty, the location parameter (offset along the x-axis)
c is the pseudo-guesing parameter (lower asymptotote)
d is the upper asymptote (not used here)
lamdba (or lam) is the same as a
zeta is the adjusted difficulty, -a * b (conversely, b = -zeta / lam)
The original equality is:
zeta + lam * theta == a * (theta - b)
Code is based on work by <NAME> and <NAME>:
Item Response Theory: Parameter Estimation Techniques
http://www.crcpress.com/product/isbn/9780824758257
The original BASIC code that this was ported from can be downloaded here:
http://www.crcpress.com/downloads/DK2939/IRTPET.zip
Some more documentation:
Main equation for partial derivative of log-likelihood dL / dx
\\frac{\\partial L}{\\partial x} =
\\sum \\frac{1}{P}\\frac{\\partial P}{\\partial x} -
\\sum (f - r)}\\frac{1}{1 - P}\\frac{\\partial P}{\\partial x}
All these algorithms minimize L and look for zeros in dL / dx
The differences in the methods have to do with how P is defined.
This version uses the 3PL (a, b, c) formulation for everything, which
has different properties than the (zeta, lambda, c) formulation
(in general the former is less stable).
If you know nothing, use the main zlc formulation and ignore this one :)
"""
from __future__ import division
import numpy as np
from scipy.optimize import root # , minimize
from .util import (
ConvergenceError,
dev_ab,
logistic,
scale_guessing,
logistic3PLabc,
chi_squared,
reduced_chi_squared,
expand_dims,
pack_abc,
unpack_abc,
get_L_abc,
)
########################################################################
## Functions to compute 1st and 2nd derivatives of the likelihood
## for the 1, 2, & 3 parameter logistic models
## For consistency, all functions now use: np.nansum( [<>] , axis=1 or 2)
########################################################################
def J_1PL(theta, r, f, P, a, b, c=0, Pstar=None):
"""Get the Jacobian of the log likelihood for the 1PL model
(1st derivative wrt b)
a here is an ARRAY, not a scalar
"""
a = a * np.ones(P.shape) # force a to be an array
theta, r, f = expand_dims(P, theta, r, f)
rmfP = r - f * P
Prat = 1 if Pstar is None else Pstar / P
L2 = -a * rmfP * Prat
return np.nansum([L2], axis=1)
def H_1PL(theta, r, f, P, a, b, c=0, Pstar=None):
"""Get the Hessian Matrix of the log likelihood for the 1PL model
(2nd derivative wrt b)
"""
theta, r, f = expand_dims(P, theta, r, f)
rmfP = r - f * P
Q = 1 - P
Pstar, Prat = (P, 1) if Pstar is None else (Pstar, Pstar / P)
EL22 = -(a ** 2) * f * Prat * Pstar * Q
return np.nansum([[EL22]], axis=2)
def J_2PL(theta, r, f, P, a, b, c=0, Pstar=None):
"""Get the Jacobian of the log likelihood for the 2PL model
(1st derivatives wrt a,b)
a here is an ARRAY, not a scalar
"""
a = a * np.ones(P.shape) # force a to be an array
theta, r, f = expand_dims(P, theta, r, f)
rmfP = r - f * P
Prat = 1 if Pstar is None else Pstar / P
thmb = theta - b
L1, L2 = np.array([thmb, -a]) * rmfP * Prat
return np.nansum([L1, L2], axis=1)
def H_2PL(theta, r, f, P, a, b, c=0, Pstar=None):
"""Get the Hessian Matrix of the log likelihood for the 2PL model
(2nd derivative wrt a,b)
"""
theta, r, f = expand_dims(P, theta, r, f)
rmfP = r - f * P
Q = 1 - P
Pstar, Prat = (P, 1) if Pstar is None else (Pstar, Pstar / P)
thmb = theta - b
EL11, EL22, EL12 = np.array([thmb ** 2, -(a ** 2), a * thmb]) * f * Prat * Pstar * Q
return np.nansum([[EL11, EL12], [EL12, EL22]], axis=2)
def J_3PL(theta, r, f, P, a, b, c, Pstar):
"""Get the Jacobian of the log likelihood for the 3PL model
(1st derivatives wrt a,b,c)
a here is an ARRAY, not a scalar
"""
a = a * np.ones(P.shape) # force a to be an array
theta, r, f = expand_dims(P, theta, r, f)
rmfP = r - f * P
iPc = 1 / (P - c)
Prat = Pstar / P
thmb = theta - b
L1, L2, L3 = np.array([thmb, -a, iPc]) * rmfP * Prat
return np.nansum([L1, L2, L3], axis=1)
def H_3PL(theta, r, f, P, a, b, c, Pstar):
"""Get the Hessian Matrix of the log likelihood for the 3PL model
(2nd derivative wrt a,b,c)
"""
theta, r, f = expand_dims(P, theta, r, f)
rmfP = r - f * P
iPc = 1 / (P - c)
Q = 1 - P
Qic = Q / (1 - c)
Prat = Pstar / P
thmb = theta - b
EL11, EL22, EL33 = (
np.array([-P * Q * thmb ** 2 * Prat, -(a ** 2) * P * Q * Prat, Qic * iPc])
* f
* Prat
)
EL12, EL13, EL23 = (
np.array([a * thmb * P * Q * Prat, -thmb * Qic, a * Qic]) * f * Prat
)
return np.nansum(
[[EL11, EL12, EL13], [EL12, EL22, EL23], [EL13, EL23, EL33]], axis=2
)
########################################################################
## Compute optimal values for the fit parameters using
## maximum likelihood estimation in the 1PL, 2PL, and 3PL cases:
########################################################################
JH = {1: (J_1PL, H_1PL), 2: (J_2PL, H_2PL), 3: (J_3PL, H_3PL)}
def get_derivative_L(num_params, theta, r, f, a=None, b=None, c=None, do2nd=False):
DL = JH[num_params][do2nd]
def derivative_L(abc):
_a, _b, _c = unpack_abc(a, b, c, abc) # unpack the parameters
Pstar = logistic(dev_ab(_a, _b, theta))
P = scale_guessing(Pstar, _c)
return DL(theta, r, f, P, _a, _b, _c, Pstar)
return derivative_L
def get_JL(num_params, theta, r, f, a=None, b=None, c=None):
return get_derivative_L(num_params, theta, r, f, a, b, c, do2nd=False)
def get_HL(num_params, theta, r, f, a=None, b=None, c=None):
return get_derivative_L(num_params, theta, r, f, a, b, c, do2nd=True)
def mle_abc(
num_params,
theta,
r,
f,
a,
b,
c,
use_2nd=False,
force_convergence=True,
method=None,
return_history=False,
verbose=True,
):
"""Perform logistic ICC model parameter estimation using MLE
Based on theoretical foundations for the 3PL model in
"Item Response Theory: Parameter Estimation Techniques"
This function is capable of performing 1PL, 2PL or 3PL depending on
the value passed as "num_params"
If return_history is True, this additionally stores and returns the
history of abc values.
"""
theta, r, f = list(map(np.asanyarray, [theta, r, f])) # ensure these are arrays
count = [0]
# Get the Jacobian (1st derivatives) of the log likelihood function
# based on current values of the a, b, c parameters.
# if use_2nd=True, also return the Hessian (2nd derivatives)
J, H = JH[num_params]
if return_history:
abc_hist = []
A, B, C = a, b, c
def JL(params):
count[0] += 1
# unpack the parameters
a, b, c = unpack_abc(A, B, C, params)
Pstar = logistic(dev_ab(a, b, theta))
P = scale_guessing(Pstar, c)
if c == 0 and num_params < 3:
Pstar = None # optimize :)
JLL = J(theta, r, f, | |
<reponame>jaxsbin/shangmi<gh_stars>1-10
# -*- coding: utf-8 -*-
from django.conf import settings
from itsdangerous import URLSafeTimedSerializer as utsr
from django.db import connection
from django.utils import timezone
from django.http import HttpResponse, QueryDict
import base64
import requests
import six
import models
import socket
import hashlib
import xmltodict
import json
from datetime import datetime
def sign(params, sign_key="<KEY>"):
method = params.get('method')
params = [(u'%s' % key, u'%s' % val) for key, val in params.iteritems() if val]
params = sorted(params)
sorted_params_string = ''.join(''.join(pair) for pair in params)
sign_str = method + sorted_params_string + sign_key,
md5 = hashlib.md5()
md5.update(sign_str[0])
return md5.hexdigest().upper()
def get_local_ip():
myname = socket.getfqdn(socket.gethostname())
myaddr = socket.gethostbyname(myname)
return myaddr
def pay_sign(params, sign_key):
params = [(u'%s' % key, u'%s' % val) for key, val in params.iteritems() if val]
sorted_params_string = '&'.join('='.join(pair) for pair in sorted(params))
sign = '{}&key={}'.format(sorted_params_string.encode('utf-8'), sign_key)
md5 = hashlib.md5()
md5.update(sign)
return md5.hexdigest().upper()
def get_actives():
url = "https://mcp.ddky.com/weixin/rest.htm"
date = datetime.now()
date = str(date).split('.')[0]
request_data = {
'activityId':587,
'city':'beijing',
'lat':39.91488908,
'lng':116.40387397,
'method':'ddky.promotion.onebuy.new.activity.pageinfo',
'pageType':1,
'plat':'H5',
'platform':'H5',
't': date,
'v':'1.0',
'versionName':'3.5.0'
}
end_url = get_url(url, request_data)
response = requests.get(end_url)
res = None
try:
json_str = re.findall(r'[^()]+', response.content)[1]
res = json.loads(json_str)
# print res,'res ok'
except Exception as e:
res = json.loads(response.content)
return res
def xml_response_to_dict(rep):
d = xmltodict.parse(rep.content)
return dict(d['response'])
def get_phone_area(phone):
url = 'http://i.ataohao.com/api/hw?cmd=80100031¶m=%3Cnumber%3E{phone}%3C/number%3E%3Ctype%3E1%3C/type%3E'.format(
phone=phone)
res = requests.get(url=url)
data = xml_response_to_dict(res)
return data.get('locInfo')
def get_url(url, params):
p = ''
for key in params:
p += "&" + key + "=" + str(params.get(key))
sign_str = sign(params)
p = url + '?sign=' +sign_str + p
return p
def dictfetchall(cursor):
"Return all rows from a cursor as a dict"
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
def confirm_validate_token(token, expiration=settings.SMALL_WEIXIN_TOKEN_VALID_TIME):
serializer = utsr(settings.SECRET_KEY)
salt = base64.encodestring(settings.SECRET_KEY)
return serializer.loads(token, salt=salt, max_age=expiration)
def generate_validate_token(openid):
serializer = utsr(settings.SECRET_KEY)
salt = base64.encodestring(settings.SECRET_KEY)
return serializer.dumps(openid, salt)
def request_user(request):
if request.method == "GET":
token = request.GET.get('token')
else:
params = QueryDict(request.body)
token = params.get('token')
openid = confirm_validate_token(token)
user = models.User.objects.get(openid=openid)
return user
def distance_to_location(current_lng, current_lat, radius):
add_lat = radius / settings.CM_DISCOVER_STORE_LAT_TO_DISTANCE
add_lng = radius / settings.CM_DISCOVER_STORE_LNG_TO_DISTANCE
radius_lat_location = add_lat / 3600
radius_lng_location = add_lng / 3600
start_lat = current_lat - radius_lat_location
end_lat = current_lat + radius_lat_location
start_lng = current_lng - radius_lng_location
end_lng = current_lng + radius_lng_location
return [start_lng, end_lng, start_lat, end_lat]
def search(key_word):
sql = '''
SELECT
i.*
FROM
recommendorder_item as i
WHERE
i.item_id in ('%(item_ids)s') AND
i.source = '%(source)s'
'''
cur = connection.cursor()
cur.execute(sql, {'item_ids': "','".join(item_list), 'source': source})
item_detail_list = dictfetchall(cur)
def get_models_by_postion(position):
sql = '''
SELECT
models.*,
unlike.unlikes,
likes.likes
FROM "yalongApp_ylmodel" as models
LEFT JOIN (SELECT count(ulike.id) unlikes ,ulike.collecteder_id
FROM "yalongApp_unlike" AS ulike LEFT JOIN "yalongApp_ylmodel" as model
ON collecteder_id=model.id
GROUP BY collecteder_id) AS unlike
ON unlike.collecteder_id=models.id
LEFT JOIN (SELECT count(li.id) likes ,li.collecteder_id
FROM "yalongApp_like" AS li LEFT JOIN "yalongApp_ylmodel" as model1
ON li.collecteder_id=model1.id
GROUP BY collecteder_id) AS likes
ON likes.collecteder_id=models.id
WHERE
models.position=1
'''
cur = connection.cursor()
cur.execute(sql, {'position': position })
item_detail_list = dictfetchall(cur)
def dictfetchall(cursor):
"Return all rows from a cursor as a dict"
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
def __category_names_to_ids(names):
cursor = connection.cursor()
cursor.execute("""
SELECT array_agg(id) as categories from standard_category where (%s)
""" % ' or '.join(["name='%s'" % n for n in names]))
result = dictfetchall(cursor)
return result[0]['categories']
def __extract_operator(key):
toks = key.split('__')
if len(toks) == 2:
return toks[0], toks[1]
return key, None
def __convert_to_sql(k, v):
if v['splitter'] == 'or':
sub = []
if k == 'category_id':
v['value'] = __category_names_to_ids(v['value'])
for i in v['value']:
sub.append("%s='%s'" % (k, i))
return '(%s)' % ' or '.join(sub)
elif v['splitter'] == 'between':
return "(%s between '%s' and '%s')" % (k, v['value'][0], v['value'][1])
def __strip_each(l):
return [val.strip() for val in l]
def growth_rate(rate):
if rate:
return '+%.2f' % rate if rate > 0 else '%.2f' % rate
def query_builder(q, m):
toks = q.split('@')
d = {}
for t in toks:
kv = t.strip().split('=')
if len(kv) != 2:
continue
key = kv[0].strip()
key, opeartor = __extract_operator(key)
if key in m:
if opeartor and opeartor == u'在':
values = kv[1].split('~')
if len(values) != 2:
continue
d[m[key]] = {'splitter': 'between',
'value': __strip_each(values)}
else:
d[m[key]] = {'splitter': 'or',
'value': __strip_each(kv[1].split(','))}
out = []
for key, values in d.items():
out.append(__convert_to_sql(key, values))
return ' and '.join(out)
def construct_where_clause(filter_dict, params):
def handle_single_filter(key):
if key.endswith('__contains'):
filter_dict[key] = '%' + filter_dict[key] + '%'
col_name = key[0: -len('__contains')]
return '%s LIKE %%(%s)s' % (col_name, key)
if key.endswith('__lt'):
col_name = key[0: -len('__lt')]
return '%s<%%(%s)s' % (col_name, key)
if key.endswith('__gt'):
col_name = key[0: -len('__gt')]
return '%s>%%(%s)s' % (col_name, key)
else:
return '%s = %%(%s)s' % (key, key)
if filter_dict is None or len(filter_dict) == 0:
return ''
clauses = [handle_single_filter(k) for k in filter_dict.keys()]
for k, v in six.iteritems(filter_dict):
params[k] = v
return '\nWHERE ' + "\n AND \n\t".join(clauses)
def get_store_active_v1(store_id, data_type, active_id):
if data_type == 'today':
today = timezone.now().date()
sql = """
SELECT
m.store_id,
m.active_id,
u.phone,
to_char(log.time, 'YYYY-MM-DD HH24:MI:SS') AS time,
active.name AS active_name,
saler.name AS saler_name,
plog.price AS price_sum,
saler.is_boss
FROM
shangmi_activestoremap AS m
LEFT JOIN
shangmi_activelog AS log
ON
m.id=log.active_map_id
LEFT JOIN
shangmi_active AS active
ON
m.active_id=active.id
LEFT JOIN
shangmi_saler AS saler
ON
saler.id=log.saler_id
LEFT JOIN
shangmi_customergetpricelog as plog
ON
log.customer_get_price_log_id=plog.id
LEFT JOIN
shangmi_user AS u
ON
plog.customer_id=u.id
WHERE
m.store_id={store_id}
AND
active.status=1
AND
log.is_writeoff=FALSE
AND
log.time::date='{date}'
AND
active.id={active_id}
AND
plog.is_writeoff=TRUE;
""".format(store_id=store_id, date=today, active_id=active_id)
elif data_type == 'all':
sql = """
SELECT
m.store_id,
m.active_id,
u.phone,
to_char(log.time, 'YYYY-MM-DD HH24:MI:SS') AS time,
active.name AS active_name,
saler.name AS saler_name,
plog.price AS price_sum,
saler.is_boss
FROM
shangmi_activestoremap AS m
LEFT JOIN
shangmi_activelog AS log
ON
m.id=log.active_map_id
LEFT JOIN
shangmi_active AS active
ON
m.active_id=active.id
LEFT JOIN
shangmi_saler AS saler
ON
saler.id=log.saler_id
LEFT JOIN
shangmi_customergetpricelog as plog
ON
log.customer_get_price_log_id=plog.id
LEFT JOIN
shangmi_user AS u
ON
plog.customer_id=u.id
WHERE
m.store_id={store_id}
AND
active.status=1
AND
plog.is_writeoff=TRUE
AND
log.is_writeoff=FALSE
AND
active.id={active_id};
""".format(store_id=store_id, active_id=active_id)
elif data_type == 'history':
sql = """
SELECT
m.store_id,
m.active_id,
u.phone,
to_char(log.time, 'YYYY-MM-DD HH24:MI:SS') AS time,
active.name AS active_name,
saler.name AS saler_name,
plog.price AS price_sum,
saler.is_boss
FROM
shangmi_activestoremap AS m
LEFT JOIN
shangmi_activelog AS log
ON
m.id=log.active_map_id
LEFT JOIN
shangmi_active AS active
ON
m.active_id=active.id
LEFT JOIN
shangmi_saler AS saler
ON
saler.id=log.saler_id
LEFT JOIN
shangmi_customergetpricelog as plog
ON
log.customer_get_price_log_id=plog.id
LEFT JOIN
shangmi_user AS u
ON
plog.customer_id=u.id
WHERE
m.store_id={store_id}
AND
active.status=1
AND
log.is_writeoff=TRUE
AND
plog.is_writeoff=TRUE
AND
active.id={active_id}
;
""".format(store_id=store_id, active_id=active_id)
else:
return []
cur = connection.cursor()
cur.execute(sql)
item_detail_list = dictfetchall(cur)
distribute = models.StoreMoneyDistribution.objects.get(
active_id=active_id)
for data in item_detail_list:
if data.get('is_boss') == True:
data['distribute'] = distribute.boss_money
else:
data['distribute'] = distribute.boss_distribute_money
data['phone'] = data['phone'][0:3]+ '****'+ data['phone'][7:]
return item_detail_list
def get_store_active(store_id, data_type, active_id):
if data_type == 'today':
today = timezone.now().date()
sql = """
SELECT
m.store_id,
m.active_id,
count(log.customer_id) AS num,
MAX(active.name) AS active_name,
max(saler.name) AS saler_name,
sum(plog.price) AS price_sum
FROM
shangmi_activestoremap AS m
LEFT JOIN
shangmi_activelog AS log
ON
m.id=log.active_map_id
LEFT JOIN
shangmi_active AS active
ON
m.active_id=active.id
LEFT JOIN
shangmi_saler AS saler
ON
saler.id=log.saler_id
LEFT JOIN
shangmi_customergetpricelog as plog
ON
log.customer_get_price_log_id=plog.id
WHERE
m.store_id={store_id}
AND
active.status=1
AND
log.is_writeoff=FALSE
AND
log.time::date='{date}'
AND
active.id={active_id}
AND
plog.is_writeoff=TRUE
GROUP BY
m.store_id, m.active_id, log.saler_id;
""".format(store_id=store_id, date=today, active_id=active_id)
elif data_type == 'all':
sql = """
SELECT
m.store_id,
m.active_id,
count(log.customer_id) AS num,
MAX(active.name) AS active_name,
max(saler.name) AS saler_name,
sum(plog.price) AS price_sum
FROM
shangmi_activestoremap AS m
LEFT JOIN
shangmi_activelog AS log
ON
m.id=log.active_map_id
LEFT JOIN
shangmi_active AS active
ON
m.active_id=active.id
LEFT JOIN
shangmi_saler AS saler
ON
saler.id=log.saler_id
LEFT JOIN
shangmi_customergetpricelog as plog
ON
log.customer_get_price_log_id=plog.id
WHERE
m.store_id={store_id}
AND
active.status=1
AND
plog.is_writeoff=TRUE
AND
log.is_writeoff=FALSE
AND
active.id={active_id}
GROUP BY
m.store_id, m.active_id, log.saler_id;
""".format(store_id=store_id, active_id=active_id)
elif | |
<reponame>TeMU-BSC/PharmaCoNER-Tagger<gh_stars>1-10
import sklearn.preprocessing
import utils
import collections
import codecs
import utils_nlp
import re
import time
import token
import os
import pickle
import random
class Dataset(object):
"""A class for handling data sets."""
def __init__(self, name='', verbose=False, debug=False):
self.name = name
self.verbose = verbose
self.debug = debug
def _parse_gaz(self,gaz_filepath):
with open(gaz_filepath) as f:
s = f.read()
lines = s.splitlines()
gaz_set = set([])
for l in lines:
gaz_set.add(l)
self.gaz_set = gaz_set
def _parse_aff(self, aff_filepath):
with open(aff_filepath) as f:
s = f.read()
lines = s.splitlines()
aff_set = dict(suffix=[], prefix=[], root=[])
for l in lines:
tmp = l.split('\n')
if tmp[0].strip() == 'suffix':
aff_set['suffix'].append(tmp[2].strip())
elif tmp[0].strip() == 'prefix':
aff_set['prefix'].append(tmp[2].strip())
elif tmp[0].strip() == 'root':
aff_set['root'].append(tmp[2].strip())
self.aff_set = aff_set
def _parse_dataset(self, dataset_filepath, parameters):
token_count = collections.defaultdict(lambda: 0)
label_count = collections.defaultdict(lambda: 0)
character_count = collections.defaultdict(lambda: 0)
if parameters['use_pos']:
pos_tag_count = collections.defaultdict(lambda: 0)
if parameters['use_gaz']:
gaz_count = collections.defaultdict(lambda: 0)
#self._parse_gaz(parameters['gaz_filepath'])
if parameters['use_aff']:
aff_count = collections.defaultdict(lambda: 0)
line_count = -1
tokens = []
labels = []
pos_tags = []
new_token_sequence = []
new_label_sequence = []
if parameters['use_pos']:
new_pos_tag_sequence = []
if parameters['use_gaz']:
new_gaz_sequence = []
gazs = []
if parameters['use_aff']:
new_aff_sequence = []
affs = []
if dataset_filepath:
f = codecs.open(dataset_filepath, 'r', 'UTF-8')
for line in f:
line_count += 1
line = line.strip().split(' ')
if len(line) == 0 or len(line[0]) == 0 or '-DOCSTART-' in line[0]:
if len(new_token_sequence) > 0:
labels.append(new_label_sequence)
tokens.append(new_token_sequence)
if parameters['use_pos']:
pos_tags.append(new_pos_tag_sequence)
if parameters['use_gaz']:
gazs.append(new_gaz_sequence)
if parameters['use_aff']:
affs.append(new_aff_sequence)
new_aff_sequence = []
new_token_sequence = []
new_label_sequence = []
new_pos_tag_sequence = []
new_gaz_sequence = []
continue
token = str(line[0])
label = str(line[-1])
# beware: in both cases we are assuming bioes
if parameters['use_pos']:
'''
if parameters['tokenizer'] == 'pos':
pos_tag = str(line[-2])
else:
pos_tag = str(line[-3])
'''
if parameters['tokenizer'] == 'pos':
pos_tag = str(line[-3])
else:
pos_tag = str(line[-4])
#print(pos_tag)
if parameters['use_gaz']:
gaz = token.lower() in self.gaz_set
if gaz:
gaz = 1
else:
gaz = 0
if parameters['use_aff']:
aff = 0
# Check for prefix
for pref in self.aff_set['prefix']:
pattern = '^' + re.escape(pref.lower())
result = re.match(pattern, token.lower())
if result:
aff = 1
for suf in self.aff_set['suffix']:
pattern = re.escape(suf.lower()) + '$'
result = re.match(pattern, token.lower())
if result:
aff = 1
for rot in self.aff_set['root']:
result = token.lower().find(rot)
if result > 1:
aff = 1
token_count[token] += 1
label_count[label] += 1
if parameters['use_pos']:
pos_tag_count[pos_tag] += 1
if parameters['use_gaz']:
gaz_count[gaz] += 1
if parameters['use_aff']:
aff_count[aff] += 1
new_token_sequence.append(token)
new_label_sequence.append(label)
if parameters['use_pos']:
new_pos_tag_sequence.append(pos_tag)
if parameters['use_gaz']:
new_gaz_sequence.append(gaz)
if parameters['use_aff']:
new_aff_sequence.append(aff)
for character in token:
character_count[character] += 1
if self.debug and line_count > 200: break# for debugging purposes
if len(new_token_sequence) > 0:
labels.append(new_label_sequence)
tokens.append(new_token_sequence)
if parameters['use_pos']:
pos_tags.append(new_pos_tag_sequence)
if parameters['use_gaz']:
gazs.append(new_gaz_sequence)
if parameters['use_aff']:
affs.append(new_aff_sequence)
f.close()
if not parameters['use_pos']:
pos_tags = None
pos_tag_count = None
if not parameters['use_gaz']:
gazs = None
gaz_count = None
if not parameters['use_aff']:
affs = None
aff_count = None
return labels, tokens, token_count, label_count, character_count, pos_tags, pos_tag_count, gazs, gaz_count, affs, aff_count
def _convert_to_indices(self, dataset_types, parameters):
tokens = self.tokens
labels = self.labels
if parameters['use_pos']:
pos_tags = self.pos_tags
if parameters['use_gaz']:
gazs = self.gazs
if parameters['use_aff']:
affs = self.affs
token_to_index = self.token_to_index
character_to_index = self.character_to_index
label_to_index = self.label_to_index
index_to_label = self.index_to_label
if parameters['use_pos']:
index_to_pos_tag = self.index_to_pos_tag
pos_tag_to_index = self.pos_tag_to_index
if parameters['use_gaz']:
gaz_to_index = self.gaz_to_index
if parameters['use_aff']:
aff_to_index = self.aff_to_index
# Map tokens and labels to their indices
token_indices = {}
label_indices = {}
characters = {}
if parameters['use_pos']:
pos_tag_indices = {}
if parameters['use_gaz']:
gaz_indices = {}
if parameters['use_aff']:
aff_indices = {}
token_lengths = {}
character_indices = {}
character_indices_padded = {}
for dataset_type in dataset_types:
token_indices[dataset_type] = []
characters[dataset_type] = []
#if parameters['use_pos']:
# pos_tags[dataset_type] = []
character_indices[dataset_type] = []
token_lengths[dataset_type] = []
character_indices_padded[dataset_type] = []
for token_sequence in tokens[dataset_type]:
token_indices[dataset_type].append([token_to_index.get(token, self.UNK_TOKEN_INDEX) for token in token_sequence])
characters[dataset_type].append([list(token) for token in token_sequence])
character_indices[dataset_type].append([[character_to_index.get(character, random.randint(1, max(self.index_to_character.keys()))) for character in token] for token in token_sequence])
token_lengths[dataset_type].append([len(token) for token in token_sequence])
longest_token_length_in_sequence = max(token_lengths[dataset_type][-1])
character_indices_padded[dataset_type].append([utils.pad_list(temp_token_indices, longest_token_length_in_sequence, self.PADDING_CHARACTER_INDEX) for temp_token_indices in character_indices[dataset_type][-1]])
label_indices[dataset_type] = []
for label_sequence in labels[dataset_type]:
label_indices[dataset_type].append([label_to_index[label] for label in label_sequence])
if parameters['use_pos']:
pos_tag_indices[dataset_type] = []
for pos_tag_sequence in pos_tags[dataset_type]:
pos_tag_indices[dataset_type].append([pos_tag_to_index[pos_tag] for pos_tag in pos_tag_sequence])
if parameters['use_gaz']:
gaz_indices[dataset_type] = []
for gaz_sequence in gazs[dataset_type]:
gaz_indices[dataset_type].append([gaz_to_index[gaz] for gaz in gaz_sequence])
if parameters['use_aff']:
aff_indices[dataset_type] = []
for aff_sequence in affs[dataset_type]:
aff_indices[dataset_type].append([aff_to_index[aff] for aff in aff_sequence])
if self.verbose:
print('token_lengths[\'train\'][0][0:10]: {0}'.format(token_lengths['train'][0][0:10]))
if self.verbose:
print('characters[\'train\'][0][0:10]: {0}'.format(characters['train'][0][0:10]))
if self.verbose:
print('token_indices[\'train\'][0:10]: {0}'.format(token_indices['train'][0:10]))
if self.verbose:
print('label_indices[\'train\'][0:10]: {0}'.format(label_indices['train'][0:10]))
if self.verbose:
print('character_indices[\'train\'][0][0:10]: {0}'.format(character_indices['train'][0][0:10]))
if self.verbose:
print('character_indices_padded[\'train\'][0][0:10]: {0}'.format(character_indices_padded['train'][0][0:10])) # Vectorize the labels
if parameters['use_pos']:
if self.verbose:
print('pos_tag_indices[\'train\'][0:10]: {0}'.format(pos_tag_indices['train'][0:10]))
if parameters['use_gaz']:
if self.verbose:
print('gaz_indices[\'train\'][0:10]: {0}'.format(gaz_indices['train'][0:10]))
if parameters['use_aff']:
if self.verbose:
print('aff_indices[\'train\'][0:10]: {0}'.format(aff_indices['train'][0:10]))
# [Numpy 1-hot array](http://stackoverflow.com/a/42263603/395857)
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(range(max(index_to_label.keys()) + 1))
label_vector_indices = {}
for dataset_type in dataset_types:
label_vector_indices[dataset_type] = []
for label_indices_sequence in label_indices[dataset_type]:
label_vector_indices[dataset_type].append(label_binarizer.transform(label_indices_sequence))
if parameters['use_pos']:
pos_tag_binarizer = sklearn.preprocessing.LabelBinarizer()
pos_tag_binarizer.fit(range(max(index_to_pos_tag.keys()) + 1))
pos_tag_vector_indices = {}
for dataset_type in dataset_types:
pos_tag_vector_indices[dataset_type] = []
for pos_tag_indices_sequence in pos_tag_indices[dataset_type]:
pos_tag_vector_indices[dataset_type].append(pos_tag_binarizer.transform(pos_tag_indices_sequence))
if parameters['use_gaz']:
gaz_vector_indices = {}
for dataset_type in dataset_types:
gaz_vector_indices[dataset_type] = []
for gaz_indices_sequence in gaz_indices[dataset_type]:
gaz_vector_index = []
for element in gaz_indices_sequence:
gaz_vector_index.append([element])
gaz_vector_indices[dataset_type].append(gaz_vector_index)
if parameters['use_aff']:
aff_vector_indices = {}
for dataset_type in dataset_types:
aff_vector_indices[dataset_type] = []
for aff_indices_sequence in aff_indices[dataset_type]:
aff_vector_index = []
for element in aff_indices_sequence:
aff_vector_index.append([element])
aff_vector_indices[dataset_type].append(aff_vector_index)
if self.verbose:
print('label_vector_indices[\'train\'][0:2]: {0}'.format(label_vector_indices['train'][0:2]))
if self.verbose:
print('len(label_vector_indices[\'train\']): {0}'.format(len(label_vector_indices['train'])))
if parameters['use_pos']:
if self.verbose:
print('pos_tag_vector_indices[\'train\'][0:2]: {0}'.format(pos_tag_vector_indices['train'][0:2]))
if self.verbose:
print('len(pos_tag_vector_indices[\'train\']): {0}'.format(len(pos_tag_vector_indices['train'])))
if parameters['use_gaz']:
if self.verbose:
print('gaz_vector_indices[\'train\'][0:2]: {0}'.format(gaz_vector_indices['train'][0:2]))
if self.verbose:
print('len(gaz_vector_indices[\'train\']): {0}'.format(len(gaz_vector_indices['train'])))
if parameters['use_aff']:
if self.verbose:
print('aff_vector_indices[\'train\'][0:2]: {0}'.format(aff_vector_indices['train'][0:2]))
if self.verbose:
print('len(aff_vector_indices[\'train\']): {0}'.format(len(aff_vector_indices['train'])))
if not parameters['use_pos']:
pos_tag_indices = None
pos_tag_vector_indices = None
if not parameters['use_gaz']:
gaz_indices = None
gaz_vector_indices = None
if not parameters['use_aff']:
aff_indices = None
aff_vector_indices = None
return token_indices, label_indices, character_indices_padded, character_indices, token_lengths, characters, label_vector_indices, pos_tag_indices, pos_tag_vector_indices, gaz_indices, gaz_vector_indices, aff_indices, aff_vector_indices
def update_dataset(self, dataset_filepaths, dataset_types):
'''
dataset_filepaths : dictionary with keys 'train', 'valid', 'test', 'deploy'
Overwrites the data of type specified in dataset_types using the existing token_to_index, character_to_index, and label_to_index mappings.
'''
for dataset_type in dataset_types:
self.labels[dataset_type], self.tokens[dataset_type], _, _, _,self.pos_tags[dataset_type] = self._parse_dataset(dataset_filepaths.get(dataset_type, None),parameters)
token_indices, label_indices, character_indices_padded, character_indices, token_lengths, characters, label_vector_indices, pos_tag_indices, pos_tag_vector_indices, gaz_indices, gaz_vector_indices, aff_indices, aff_vector_indices = self._convert_to_indices(dataset_types,parameters)
self.token_indices.update(token_indices)
self.label_indices.update(label_indices)
self.character_indices_padded.update(character_indices_padded)
self.character_indices.update(character_indices)
self.token_lengths.update(token_lengths)
self.characters.update(characters)
self.label_vector_indices.update(label_vector_indices)
if parameters['use_pos']:
self.pos_tag_indices.update(pos_tag_indices)
self.pos_tag_vector_indices.update(pos_tag_vector_indices)
if parameters['use_gaz']:
self.gaz_indices.update(gaz_indices)
self.gaz_vector_indices.update(gaz_vector_indices)
if parameters['use_aff']:
self.aff_indices.update(aff_indices)
self.aff_vector_indices.update(aff_vector_indices)
def load_dataset(self, dataset_filepaths, parameters, token_to_vector=None):
'''
dataset_filepaths : dictionary with keys 'train', 'valid', 'test', 'deploy'
'''
start_time = time.time()
print('Load dataset... ', end='', flush=True)
if parameters['token_pretrained_embedding_filepath'] != '':
if token_to_vector==None:
token_to_vector = utils_nlp.load_pretrained_token_embeddings(parameters)
else:
token_to_vector = {}
if self.verbose: print("len(token_to_vector): {0}".format(len(token_to_vector)))
if parameters['use_gaz']:
self._parse_gaz(parameters['gaz_filepath'])
if parameters['use_aff']:
self._parse_aff(parameters['aff_filepath'])
# Load pretraining dataset to ensure that index to label is compatible to the pretrained model,
# and that token embeddings that are learned in the pretrained model are loaded properly.
all_tokens_in_pretraining_dataset = []
all_characters_in_pretraining_dataset = []
if parameters['use_pretrained_model']:
pretraining_dataset = pickle.load(open(os.path.join(parameters['pretrained_model_folder'], 'dataset.pickle'), 'rb'))
all_tokens_in_pretraining_dataset = pretraining_dataset.index_to_token.values()
all_characters_in_pretraining_dataset = pretraining_dataset.index_to_character.values()
remap_to_unk_count_threshold = 1
self.UNK_TOKEN_INDEX = 0
self.PADDING_CHARACTER_INDEX = 0
self.tokens_mapped_to_unk = []
self.UNK = 'UNK'
self.unique_labels = []
labels = {}
tokens = {}
pos_tags = {}
gazs = {}
affs = {}
label_count = {}
token_count = {}
character_count = {}
pos_tag_count = {}
gaz_count = {}
aff_count = {}
for dataset_type in ['train', 'valid', 'test', 'deploy']:
labels[dataset_type], tokens[dataset_type], token_count[dataset_type], label_count[dataset_type], character_count[dataset_type], \
pos_tags[dataset_type], pos_tag_count[dataset_type],gazs[dataset_type],gaz_count[dataset_type], affs[dataset_type],aff_count[dataset_type] = self._parse_dataset(dataset_filepaths.get(dataset_type, None),parameters)
if self.verbose: print("dataset_type: {0}".format(dataset_type))
if self.verbose: print("len(token_count[dataset_type]): {0}".format(len(token_count[dataset_type])))
token_count['all'] = {}
for token in list(token_count['train'].keys()) + list(token_count['valid'].keys()) + list(token_count['test'].keys()) + list(token_count['deploy'].keys()):
token_count['all'][token] = token_count['train'][token] + token_count['valid'][token] + token_count['test'][token] + token_count['deploy'][token]
if parameters['load_all_pretrained_token_embeddings']:
for token in token_to_vector:
if token not in token_count['all']:
token_count['all'][token] = -1
token_count['train'][token] = -1
for token in all_tokens_in_pretraining_dataset:
if token not in token_count['all']:
token_count['all'][token] = -1
token_count['train'][token] = -1
character_count['all'] = {}
for character in list(character_count['train'].keys()) + list(character_count['valid'].keys()) + list(character_count['test'].keys()) + list(character_count['deploy'].keys()):
character_count['all'][character] = character_count['train'][character] + character_count['valid'][character] + character_count['test'][character] + character_count['deploy'][character]
for character in all_characters_in_pretraining_dataset:
if character not in character_count['all']:
character_count['all'][character] = -1
character_count['train'][character] = -1
for dataset_type in dataset_filepaths.keys():
if self.verbose: print("dataset_type: {0}".format(dataset_type))
if self.verbose: print("len(token_count[dataset_type]): {0}".format(len(token_count[dataset_type])))
label_count['all'] = {}
for character in list(label_count['train'].keys()) + list(label_count['valid'].keys()) + list(label_count['test'].keys()) + list(label_count['deploy'].keys()):
label_count['all'][character] = label_count['train'][character] + label_count['valid'][character] + label_count['test'][character] + label_count['deploy'][character]
if parameters['use_pos']:
| |
1)
)
res = self.post_agreement_update(agreement_id, {
'countersignedAgreementPath': 'countersigned/file.jpg'
})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
expected_agreement_json = {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'countersigned',
'signedAgreementPath': 'path/file.pdf',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'countersignedAgreementReturnedAt': '2016-11-01T01:01:01.000000Z',
'countersignedAgreementPath': 'countersigned/file.jpg'
}
assert data['agreement'] == expected_agreement_json
res2 = self.client.get('/agreements/{}'.format(agreement_id))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == expected_agreement_json
@fixture_params(
'live_example_framework', {
'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'},
'slug': 'g-cloud-11',
}
)
def test_cannot_update_countersigned_agreement_path_if_agreement_has_not_been_approved(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_path='path/file.pdf',
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1)
)
res = self.post_agreement_update(agreement_id, {
'countersignedAgreementPath': 'countersigned/file.jpg'
})
assert res.status_code == 400
assert json.loads(res.get_data(as_text=True)) == {
'error': 'Can not update countersignedAgreementPath if agreement has not been approved for countersigning'
}
@fixture_params(
'live_example_framework', {
'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'},
'slug': 'g-cloud-12',
}
)
def test_can_update_countersigned_agreement_path_without_approval_for_esignature_framework(
self, supplier_framework
):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_path='path/file.pdf',
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1)
)
res = self.post_agreement_update(agreement_id, {
'countersignedAgreementPath': 'countersigned/file.jpg'
})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
expected_agreement_json = {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'countersigned',
'signedAgreementPath': 'path/file.pdf',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'countersignedAgreementPath': 'countersigned/file.jpg'
}
assert data['agreement'] == expected_agreement_json
res2 = self.client.get('/agreements/{}'.format(agreement_id))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == expected_agreement_json
def test_can_unset_countersigned_agreement_path(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_path='path/file.pdf',
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1),
countersigned_agreement_returned_at=datetime(2016, 11, 1, 1, 1, 1),
countersigned_agreement_path='countersigned/that/bad/boy.pdf'
)
res = self.post_agreement_update(agreement_id, {
'countersignedAgreementPath': None
})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
expected_agreement_json = {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementPath': 'path/file.pdf',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'countersignedAgreementReturnedAt': '2016-11-01T01:01:01.000000Z'
}
assert data['agreement'] == expected_agreement_json
res2 = self.client.get('/agreements/{}'.format(agreement_id))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == expected_agreement_json
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
class TestSignFrameworkAgreementThatHasFrameworkAgreementVersion(BaseFrameworkAgreementTest):
def sign_agreement(self, agreement_id, agreement):
return self.client.post(
'/agreements/{}/sign'.format(agreement_id),
data=json.dumps(
{
'updated_by': '<EMAIL>',
'agreement': agreement
}),
content_type='application/json')
def test_can_sign_framework_agreement(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'signerName': 'name', 'signerRole': 'role'},
signed_agreement_path='/example.pdf'
)
with freeze_time('2016-12-12'):
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': 1}})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'signed',
'signedAgreementPath': '/example.pdf',
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
'uploaderUserId': user_role_supplier,
'frameworkAgreementVersion': 'v1.0'
},
'signedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z'
}
def test_signing_framework_agreement_produces_audit_event(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'signerName': 'name', 'signerRole': 'role'},
signed_agreement_path='/example.pdf'
)
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': user_role_supplier}})
assert res.status_code == 200
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).first()
assert audit.type == "sign_agreement"
assert audit.user == "<EMAIL>"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'update': {'signedAgreementDetails': {'uploaderUserId': user_role_supplier}}
}
def test_can_re_sign_framework_agreement(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={
'signerName': 'name',
'signerRole': 'role',
'uploaderUserId': 2,
'frameworkAgreementVersion': 'v1.0'
},
signed_agreement_path='/example.pdf',
signed_agreement_returned_at=datetime.utcnow()
)
with freeze_time('2016-12-12'):
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': user_role_supplier}})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'signed',
'signedAgreementPath': '/example.pdf',
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
'uploaderUserId': 1,
'frameworkAgreementVersion': 'v1.0'
},
'signedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z'
}
def test_can_not_sign_framework_agreement_that_has_no_signer_name(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'signerRole': 'role'},
signed_agreement_path='/example.pdf'
)
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': user_role_supplier}})
assert res.status_code == 400
assert (
json.loads(res.get_data(as_text=True))['error'] == {'signerName': 'answer_required'})
def test_can_not_sign_framework_agreement_that_has_no_signer_role(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'signerName': 'name'},
signed_agreement_path='/example.pdf'
)
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': user_role_supplier}})
assert res.status_code == 400
assert (
json.loads(res.get_data(as_text=True))['error'] == {'signerRole': 'answer_required'})
def test_400_if_user_signing_framework_agreement_does_not_exist(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'signerName': 'name', 'signerRole': 'role'},
signed_agreement_path='/example.pdf'
)
# The user_role_supplier fixture sets up user with ID 1; there is no user with ID 20
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': 20}})
assert res.status_code == 400
assert (
json.loads(res.get_data(as_text=True))['error'] == "No user found with id '20'")
# Frameworks prior to G-Cloud 8 do not have framework_agreement_version set, and signing these stores only the timestamp
class TestSignFrameworkAgreementThatHasNoFrameworkAgreementVersion(BaseFrameworkAgreementTest):
def sign_agreement(self, agreement_id):
return self.client.post(
'/agreements/{}/sign'.format(agreement_id),
data=json.dumps(
{
'updated_by': '<EMAIL>'
}),
content_type='application/json')
def test_can_sign_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
with freeze_time('2016-12-12'):
res = self.sign_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'signed',
'signedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z'
}
def test_signing_framework_agreement_produces_audit_event(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.sign_agreement(agreement_id)
assert res.status_code == 200
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).first()
assert audit.type == "sign_agreement"
assert audit.user == "<EMAIL>"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
}
def test_can_re_sign_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime.utcnow()
)
with freeze_time('2016-12-12'):
res = self.sign_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'signed',
'signedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z'
}
class TestPutFrameworkAgreementOnHold(BaseFrameworkAgreementTest):
def put_framework_agreement_on_hold(self, agreement_id):
return self.client.post(
'/agreements/{}/on-hold'.format(agreement_id),
data=json.dumps(
{
'updated_by': '<EMAIL>'
}),
content_type='application/json')
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_put_framework_agreement_on_hold(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1),
)
with freeze_time('2016-12-12'):
res = self.put_framework_agreement_on_hold(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'on-hold',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'signedAgreementPutOnHoldAt': '2016-12-12T00:00:00.000000Z'
}
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).first()
assert audit.type == "update_agreement"
assert audit.user == "<EMAIL>"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'on-hold'
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_not_put_unsigned_framework_agreement_on_hold(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.put_framework_agreement_on_hold(agreement_id)
assert res.status_code == 400
error_message = json.loads(res.get_data(as_text=True))['error']
assert error_message == "Framework agreement must have status 'signed' to be put on hold"
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_not_put_countersigned_framework_agreement_on_hold(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 9, 1),
countersigned_agreement_returned_at=datetime(2016, 10, 1)
)
res = self.put_framework_agreement_on_hold(agreement_id)
assert res.status_code == 400
error_message = json.loads(res.get_data(as_text=True))['error']
assert error_message == "Framework agreement must have status 'signed' to be put on hold"
def test_can_not_put_framework_agreement_on_hold_that_has_no_framework_agreement_version(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1)
)
res = self.put_framework_agreement_on_hold(agreement_id)
assert res.status_code == 400
error_message = json.loads(res.get_data(as_text=True))['error']
assert error_message == "Framework agreement must have a 'frameworkAgreementVersion' to be put on hold"
class TestApproveFrameworkAgreement(BaseFrameworkAgreementTest):
def approve_framework_agreement(self, agreement_id):
return self.client.post(
'/agreements/{}/approve'.format(agreement_id),
data=json.dumps(
{
'updated_by': '<EMAIL>',
'agreement': {'userId': '1234'}
}),
content_type='application/json')
def unapprove_framework_agreement(self, agreement_id):
return self.client.post(
'/agreements/{}/approve'.format(agreement_id),
data=json.dumps(
{
'updated_by': '<EMAIL>',
'agreement': {'userId': '1234', 'unapprove': True}
}),
content_type='application/json')
@fixture_params(
'live_example_framework', {
'framework_agreement_details': {
'frameworkAgreementVersion': 'v1.0',
'countersignerName': 'The Boss',
'countersignerRole': 'Director of Strings'
}
}
)
def test_can_approve_signed_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1),
)
with freeze_time('2016-12-12'):
res = self.approve_framework_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'countersignedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z',
'countersignedAgreementDetails': {
'countersignerName': 'The Boss',
'countersignerRole': 'Director of Strings',
'approvedByUserId': '1234'
}
}
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).first()
assert audit.type == "countersign_agreement"
assert audit.user == "<EMAIL>"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved'
}
@fixture_params(
'live_example_framework', {
'framework_agreement_details': {
'frameworkAgreementVersion': 'v1.0',
'countersignerName': 'The Boss',
'countersignerRole': 'Director of Strings'
}
}
)
def test_can_approve_on_hold_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1),
)
with freeze_time('2016-10-02'):
on_hold_res = self.client.post(
'/agreements/{}/on-hold'.format(agreement_id),
data=json.dumps(
{
'updated_by': '<EMAIL>'
}),
content_type='application/json')
assert on_hold_res.status_code == 200
on_hold_data = json.loads(on_hold_res.get_data(as_text=True))['agreement']
assert on_hold_data['status'] == 'on-hold'
with freeze_time('2016-10-03'):
res = self.approve_framework_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert 'signedAgreementPutOnHoldAt' not in data['agreement']
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'countersignedAgreementReturnedAt': '2016-10-03T00:00:00.000000Z',
'countersignedAgreementDetails': {
'countersignerName': '<NAME>',
'countersignerRole': 'Director of Strings',
'approvedByUserId': '1234'
}
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_not_approve_unsigned_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.approve_framework_agreement(agreement_id)
assert res.status_code == 400
error_message = json.loads(res.get_data(as_text=True))['error']
assert error_message == "Framework agreement must have status 'signed' or 'on hold' to be countersigned"
def test_can_approve_framework_agreement_that_has_no_framework_agreement_version(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1)
)
with freeze_time('2016-10-03'):
res = self.approve_framework_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'countersignedAgreementReturnedAt': '2016-10-03T00:00:00.000000Z',
'countersignedAgreementDetails': {'approvedByUserId': '1234'}
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_approve_framework_agreement_with_agreement_version_but_no_name_or_role(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1)
)
with freeze_time('2016-10-03'):
res = self.approve_framework_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'countersignedAgreementReturnedAt': '2016-10-03T00:00:00.000000Z',
'countersignedAgreementDetails': {'approvedByUserId': '1234'}
}
@fixture_params(
'live_example_framework', {
'framework_agreement_details': {
'frameworkAgreementVersion': 'v1.0',
'countersignerName': 'The Boss',
'countersignerRole': 'Director of Strings'
}
}
)
def test_serialized_supplier_framework_contains_updater_details_after_approval(self, supplier_framework):
user = User(
id=1234,
name='Chris',
email_address='<EMAIL>',
| |
== 3:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] - window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] - window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] + window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] + window, dList[5][2] - window]
ew[0] = [dList[4][0] + embrasure, dList[4][1] - window, dList[4][2] - window]
ew[1] = [dList[1][0] + embrasure, dList[1][1] - window, dList[1][2] + window]
ew[2] = [dList[2][0] + embrasure, dList[2][1] + window, dList[2][2] + window]
ew[3] = [dList[5][0] + embrasure, dList[5][1] + window, dList[5][2] - window]
elif side == 0:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] + window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] + window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] - window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] - window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] + window, dList[4][1] + embrasure, dList[4][2] - window]
ew[1] = [dList[1][0] + window, dList[1][1] + embrasure, dList[1][2] + window]
ew[2] = [dList[2][0] - window, dList[2][1] + embrasure, dList[2][2] + window]
ew[3] = [dList[5][0] - window, dList[5][1] + embrasure, dList[5][2] - window]
elif side == 2:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] - window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] - window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] + window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] + window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] - window, dList[4][1] - embrasure, dList[4][2] - window]
ew[1] = [dList[1][0] - window, dList[1][1] - embrasure, dList[1][2] + window]
ew[2] = [dList[2][0] + window, dList[2][1] - embrasure, dList[2][2] + window]
ew[3] = [dList[5][0] + window, dList[5][1] - embrasure, dList[5][2] - window]
dwring = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[3]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(dw[0])
binosemantics(lod3geometry, d4, dwring)
dw0 = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[0]) + ' ' + GMLPointList(dw[0])
dw1 = GMLPointList(dw[1]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(dw[1])
dw2 = GMLPointList(dw[3]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[3])
dw3 = GMLPointList(dw[3]) + ' ' + GMLPointList(dw[0]) + ' ' + GMLPointList(ew[0]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(dw[3])
ew0 = GMLPointList(ew[0]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[0])
for bipoly in [dw0, dw1, dw2, dw3]:
binosemantics(lod3geometry, bipoly)
binosemantics(lod3geometry, ew0)
else:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
if side == 1:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] + window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] + window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] - window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] - window, dList[5][2] - window]
ew[0] = [dList[4][0], dList[4][1] + window, dList[4][2] - window]
ew[1] = [dList[1][0], dList[1][1] + window, dList[1][2] + window]
ew[2] = [dList[2][0], dList[2][1] - window, dList[2][2] + window]
ew[3] = [dList[5][0], dList[5][1] - window, dList[5][2] - window]
elif side == 3:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] - window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] - window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] + window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] + window, dList[5][2] - window]
ew[0] = [dList[4][0], dList[4][1] - window, dList[4][2] - window]
ew[1] = [dList[1][0], dList[1][1] - window, dList[1][2] + window]
ew[2] = [dList[2][0], dList[2][1] + window, dList[2][2] + window]
ew[3] = [dList[5][0], dList[5][1] + window, dList[5][2] - window]
elif side == 0:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] + window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] + window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] - window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] - window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] + window, dList[4][1], dList[4][2] - window]
ew[1] = [dList[1][0] + window, dList[1][1], dList[1][2] + window]
ew[2] = [dList[2][0] - window, dList[2][1], dList[2][2] + window]
ew[3] = [dList[5][0] - window, dList[5][1], dList[5][2] - window]
elif side == 2:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] - window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] - window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] + window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] + window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] - window, dList[4][1], dList[4][2] - window]
ew[1] = [dList[1][0] - window, dList[1][1], dList[1][2] + window]
ew[2] = [dList[2][0] + window, dList[2][1], dList[2][2] + window]
ew[3] = [dList[5][0] + window, dList[5][1], dList[5][2] - window]
dwring = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[3]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(dw[0])
#ew0 = GMLPointList(ew[0]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[0])
binosemantics(lod3geometry, d4, dwring)
#binosemantics(lod3geometry, ew0)
elif kind == 'chimney':
lod3geometry = etree.SubElement(bi, "{%s}lod3Geometry" % ns_bldg)
d1 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[4] + ' ' + dListGML[7] + ' ' + dListGML[0]
d2 = dListGML[3] + ' ' + dListGML[6] + ' ' + dListGML[5] + ' ' + dListGML[2] + ' ' + dListGML[3]
d3 = dListGML[0] + ' ' + dListGML[7] + ' ' + dListGML[6] + ' ' + dListGML[3] + ' ' + dListGML[0]
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
binosemantics(lod3geometry, d1)
binosemantics(lod3geometry, d2)
binosemantics(lod3geometry, d3)
binosemantics(lod3geometry, d4)
d5 = dListGML[7] + ' ' + dListGML[4] + ' ' + dListGML[5] + ' ' + dListGML[6] + ' ' + dListGML[7]
binosemantics(lod3geometry, d5)
#-- Closure surface in the roof
# d0 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[3] + ' ' + dListGML[0]
# binosemantics(lod3geometry, d0)
#-- Closure surface in the roof
d0 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[3] + ' ' + dListGML[0]
bisemantics(lod3geometry, d0, "ClosureSurface")
if semantics == 1:
if kind == 'dormer':
d1 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[4] + ' ' + dListGML[0]
d2 = dListGML[0] + ' ' + dListGML[4] + ' ' + dListGML[5] + ' ' + dListGML[3] + ' ' + dListGML[0]
d3 = dListGML[5] + ' ' + dListGML[2] + ' ' + dListGML[3] + ' ' + dListGML[5]
bisemantics(bi, d1, "WallSurface")
bisemantics(bi, d2, "RoofSurface")
bisemantics(bi, d3, "WallSurface")
if window is None:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
bisemantics(bi, d4, "WallSurface")
if window is not None:
#-- Face with the window
if embrasure is not None and embrasure > 0.0:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
if side == 1:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] + window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] + window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] - window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] - window, dList[5][2] - window]
ew[0] = [dList[4][0] - embrasure, dList[4][1] + window, dList[4][2] | |
<filename>chemics/biomass_composition.py<gh_stars>10-100
"""
Use the `biocomp()` function to calculate biomass composition. Use the
`plot_biocomp()` function to create a Matplotlib figure of the biomass
composition results.
"""
import numpy as np
def biocomp(yc, yh, yo=None, yh2o=0, yash=0, alpha=0.6, beta=0.8, gamma=0.8,
delta=1, epsilon=1, printcomp=False):
"""
Determine bimoass composition from ultimate analysis mass fractions of C,
H, and O. Composition returned as cellulose, hemicellulose, lignins, and
extractives based on method discussed in the Debiagi 2015 paper [1]_.
Parameters
----------
yc : float
Mass fraction of carbon in biomass, dry ash free basis [-]
yh : float
Mass fraction of hydrogen in biomass, dry ash free basis [-]
yo : float, optional
Mass fraction of oxygen in biomass, if not given then value is
calculated as difference, dry ash free basis [-]. Default is None.
yh2o : float, optional
Mass fraction of water in biomass, as received basis [-]. Default is 0.
yash : float, optional
Mass fraction of ash in biomass, as received basis [-]. Default is 0.
alpha : float, optional
Splitting parameter as molar ratio of cellulose and hemicellulose
contained in reference mixture RM1 [-]. Default is 0.6.
beta : float, optional
Splitting parameter as molar ratio of lignin LIG-O and lignin LIG-C
contained in reference mixture RM2 [-]. Default is 0.8.
gamma : float, optional
Splitting parameter as molar ratio of lignin LIG-H and lignin LIG-C
contained in reference mixture RM3 [-]. Default is 0.8.
delta : float, optional
Splitting parameter as molar ratio of lignins (LIG-H and LIG-C) and
extractive TGL to define reference mixture RM2 [-]. Default is 1.0.
epsilon : float, optional
Splitting parameter as molar ratio of lignins (LIG-O and LIG-C) and
extractive TANN to define reference mixture RM3 [-]. Default is 1.0.
printcomp : bool, optional
Print composition results if True. Default is False.
Returns
-------
comp : dict
Dictionary representing reference mixtures and biomass compositions on
the basis of mole fractions (x) and mass fractions (y).
- `y_rm1` mass fractions [C, H, O] of reference mixture RM1
- `y_rm2` mass fractions [C, H, O] of reference mixture RM2
- `y_rm3` mass fractions [C, H, O] of reference mixture RM3
- `x_daf` mole fractions [cell, hemi, ligc, ligh, ligo, tann, tgl] of
biomass as dry ash-free basis
- `x_wet` mole fractions [cell, hemi, ligc, ligh, ligo, tann, tgl] of
biomass as wet basis
- `y_daf` mass fractions [cell, hemi, ligc, ligh, ligo, tann, tgl] of
biomass as dry ash-free basis
- `y_wet` mass fractions [cell, hemi, ligc, ligh, ligo, tann, tgl] of
biomass as wet basis
- `y_wetash` mass fractions [cell, hemi, ligc, ligh, ligo, tann, tgl]
of biomass as wet ash basis
Raises
------
ValueError
When sum of mass fractions is not equal to one.
Examples
--------
>>> yc = 0.534
>>> yh = 0.06
>>> bc = biocomp(yc, yh)
>>> bc['y_daf']
array([0.2936, 0.1594, 0.0712, 0.2934, 0.1822, 0, 0])
>>> yc = 0.500
>>> yh = 0.060
>>> yo = 0.440
>>> yash = 0.15
>>> biocomp(yc, yh, yo, yash=yash, printcomp=True)
basis cell hemi ligc ligh ligo tann tgl
x_daf 0.5016 0.3344 0.0328 0.0614 0.0698 0.0000 0.0000
x_wet 0.5016 0.3344 0.0328 0.0614 0.0698 0.0000 0.0000
y_daf 0.4275 0.2322 0.0445 0.1409 0.1549 0.0000 0.0000
y_wet 0.4275 0.2322 0.0445 0.1409 0.1549 0.0000 0.0000
y_wetash 0.3634 0.1974 0.0378 0.1197 0.1317 0.0000 0.0000
References
----------
.. [1] <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, and <NAME>.
Extractives Extend the Applicability of Multistep Kinetic Scheme of Biomass
Pyrolysis. Energy and Fuels, vol. 29, no. 10, pp. 6544-6555, 2015.
"""
# Determine oxygen mass fraction by difference if not explicitly given
if yo is None:
yo = 1 - yc - yh
# Check that mass fractions sum to one
sumy = yc + yh + yo
tol = 1e-4
if abs(sumy - 1.0) > tol:
raise ValueError('Sum of mass fractions must equal one.')
# Cellulose, hemicellulose, and lignins as arrays of [C, H, O]
# See Figure 1 in reference for these formulas
cell = np.array([6, 10, 5])
hemi = np.array([5, 8, 4])
ligc = np.array([15, 14, 4])
ligh = np.array([22, 28, 9])
ligo = np.array([20, 22, 10])
tann = np.array([15, 12, 7])
tgl = np.array([57, 100, 7])
# Molecular weight of cellulose, hemicellulose, lignins, and water
mw_cell = 162.141
mw_hemi = 132.115
mw_ligc = 258.273
mw_ligh = 436.457
mw_ligo = 422.386
mw_tann = 304.254
mw_tgl = 897.42
mw_h2o = 18.015
# Solve for pseudo species
# -----------------------------------------------------------------------
# Reference mixture where rm = [C, H, O]
rm1 = alpha * cell + (1 - alpha) * hemi
rm2 = beta * delta * ligh + (1 - beta) * delta * ligc + (1 - beta * delta - (1 - beta) * delta) * tgl
rm3 = gamma * epsilon * ligo + (1 - gamma) * epsilon * ligc + (1 - gamma * epsilon - (1 - gamma) * epsilon) * tann
# Molecular weight of reference mixture where C, H, O given as 12, 1, 16
mw_rm1 = sum(rm1 * [12, 1, 16])
mw_rm2 = sum(rm2 * [12, 1, 16])
mw_rm3 = sum(rm3 * [12, 1, 16])
# Mass fraction of reference mixture where y_rm = [yC, yH, yO]
y_rm1 = (rm1 * [12, 1, 16]) / mw_rm1
y_rm2 = (rm2 * [12, 1, 16]) / mw_rm2
y_rm3 = (rm3 * [12, 1, 16]) / mw_rm3
# Mass fraction of pseudo species where ys = [S1, S2, S3]
# Solve system of linear equations Ax = b
# A is 3x3 matrix with columns = [yC, yH, yO]
a = np.array([y_rm1, y_rm2, y_rm3]).T
b = np.array([yc, yh, yo])
ys = np.linalg.solve(a, b)
# Sum of mass fractions and molecular weights as sum(ys/MW)
sum_ymw = sum(ys / [mw_rm1, mw_rm2, mw_rm3])
# Mole fraction of pseudo species where xs = [S1, S2, S3]
xs = (ys / [mw_rm1, mw_rm2, mw_rm3]) / sum_ymw
# Biomass composition
# -----------------------------------------------------------------------
# Mole fraction as dry ash free basis for biomass components where
# x_daf = [cell, hemi, ligc, ligh, ligo, tann, tgl]
x_dafcell = alpha * xs[0]
x_dafhemi = (1 - alpha) * xs[0]
x_dafligc = (1 - beta) * delta * xs[1] + (1 - gamma) * epsilon * xs[2]
x_dafligh = beta * delta * xs[1]
x_dafligo = gamma * epsilon * xs[2]
x_daftann = (1 - gamma * epsilon - (1 - gamma) * epsilon) * xs[2]
x_daftgl = (1 - beta * delta - (1 - beta) * delta) * xs[1]
x_daf = np.array([x_dafcell, x_dafhemi, x_dafligc, x_dafligh, x_dafligo, x_daftann, x_daftgl])
# Molecular weight vector for biomass components where
# mw_comp = [mw_cell, mw_hemi, mw_ligc, mw_ligh, mw_ligo, mw_tann, mw_tgl]
mw_comp = x_daf * [mw_cell, mw_hemi, mw_ligc, mw_ligh, mw_ligo, mw_tann, mw_tgl]
# Average molecular weight of all biomass components
mw_avg = sum(mw_comp)
# Mass fraction as dry ash free basis for biomass components where
# y_daf = [cell, hemi, ligc, ligh, ligo, tann, tgl]
y_daf = mw_comp / mw_avg
# Mass fraction as wet basis for biomass components where
# y_wet = [cell, hemi, ligc, ligh, ligo, tann, tgl]
y_wet = y_daf * (1 - yh2o)
# Mass fraction as wet + ash basis for biomass components where
# y_wetash = [cell, hemi, ligc, ligh, ligo, tann, tgl]
y_wetash = y_daf * (1 - yh2o - yash)
# Mole fraction as wet basis for biomass components where
# x_wet = [cell, hemi, ligc, ligh, ligo, tann, tgl]
ywet_yh2o = np.concatenate((y_wet, [yh2o]))
sum_xmw = sum(ywet_yh2o / [mw_cell, mw_hemi, mw_ligc, mw_ligh, mw_ligo, mw_tann, mw_tgl, mw_h2o])
x_wet = (y_wet / [mw_cell, mw_hemi, mw_ligc, mw_ligh, mw_ligo, mw_tann, mw_tgl]) / sum_xmw
comp = {
'y_rm1': y_rm1,
'y_rm2': y_rm2,
'y_rm3': y_rm3,
'x_daf': x_daf,
'x_wet': x_wet,
'y_daf': y_daf,
'y_wet': y_wet,
'y_wetash': y_wetash
}
# Print biomass composition results
if printcomp:
print('basis\t cell\t hemi\t ligc\t ligh\t ligo\t tann\t tgl')
print('x_daf\t', ' '.join(f'{x:.4f}' for x | |
'''
author: bg
goal:
type: modelz - sklearn for workflow management + PyTorch/keras for transfer learning components + pytorch for nn modules
how: wrapper class for workflow management (pytorch opt&loss + sklearn pipe&metrics) + ArchitectureMixin and implementation for custom architectures.
ref:
refactors:
'''
from tqdm import tqdm
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin, RegressorMixin, ClusterMixin
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, StratifiedKFold
## TODO: use same @ transforms
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.utils.multiclass import unique_labels
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import nnarchs
from zdata import ZSerializableMixin
from report import ZReporter
import cv2 ## K-Means Clustering
class ZModel(ZSerializableMixin, BaseEstimator): ## TODO: VS internal class for nn.Module subclassing <<< subClassing Vs has-a
## cuda, optimizer, loss, evaluate
MSG_LOSS_PER_EPOCH = "[{:20s}] Train Epoch {:5d}: Loss: {:15.4f} \tn = {:5d}".format
MSG_ACC_ON_PREDICT = "[{:20s}] Predict on n ={:5d}: Acc: {:15.4f}".format
MSG_YHAT_Y_VALZ = "{:3d}. {:4.2f} ===> {:4.2f}".format
def __init__(self, parent_caller_name, nnModel=None, ##TODO: non-nn Models and traditional ML
use_cuda=False, ##TODO: pass to ZNNArch
epochs=3,
loss_func=(nn.CrossEntropyLoss, {}) ,
optimizer=(optim.SGD, {'lr':0.001, 'momentum':0.9}) ):
## setup layers and initialize model weights and biases
self.parent_caller_name = parent_caller_name ## for loggin purposes
self.nnModel = nnModel
self.epochs = epochs
self.loss_func = loss_func
self.optimizer = optimizer
self.loss_thresh = 1e-6
## TODO: cuda calls
self.use_cuda = use_cuda
self.init_cuda()
### === sklearn pipeline management imple ===
def fit(self, X, y=None): ## X is NxF, y is Nx1
## train model + batches
self.train(X, y)
return self
def transform(self, X, y=None):## X is NxF, y is Nx1
## predict with no_grad and return list of predictions as is
return self.predict(X, y, log=False)
def score(self, X_, y_=None):## y is Nx1
yhat = self.predict(X_, y_)
ACC_ = self.score(yhat, y_)
return ACC_
### === things pyTorch and cuda ==========
def zscore(self, yhat, y_):## y is Nx1
ACC_ = np.array( [ int( int(y) == int( yh ) ) for y, yh in zip(y_, yhat )]).mean()
ZReporter.add_log_entry( self.MSG_ACC_ON_PREDICT(self.parent_caller_name, len(y_), ACC_ ) )
return ACC_
def init_cuda(self):
if self.use_cuda:
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
print('Cuda is not available')
self.device = torch.device('cpu')
## TODO: returns + skl.metrics
def train(self, X_train, y_train=None):
N_ = len(X_train)
lossor = self.loss_func[0]( **self.loss_func[1])
optimor = self.optimizer[0]( self.nnModel.parameters(), **self.optimizer[1])
def fwd_pass(x, y=None, ebatch=0):
if (y is not None) and isinstance(y, np.ndarray): ## force tensor <<< TODO: fix at source
y = torch.tensor(y)
## 1. train on batch
self.nnModel.train() ## set to training mode
outz = self.nnModel.forward( x )
# print(outz)
# print(y)
# print( type(outz), type(y))
# print( f"****** SHAPEZ::: *****\n yhat ={outz[0].shape} y = {y[0].shape }")
l_ = lossor( outz, y)
optimor.zero_grad()
l_.backward()
optimor.step()
## 2. evaluate batch ##TODO: ensure batches
if ebatch % 10 == 0 or ebatch == self.epochs:
self.nnModel.eval()
outz = self.nnModel.forward(x)
l_ = lossor( outz, y)
return l_.item()
LOSS_ = 0.
for e in range( self.epochs):
if isinstance(X_train, DataLoader):
pass
else:
if y_train is None:
for x in X_train:
LOSS_ += fwd_pass(x, ebatch=e+1)
else:
for x, y in zip(X_train, y_train):
LOSS_ += fwd_pass(x, y, e+1)
ZReporter.add_log_entry( self.MSG_LOSS_PER_EPOCH(self.parent_caller_name, e+1, LOSS_ ,N_ ) )
## TODO: better flow and when to stop
if LOSS_ <= self.loss_thresh:
break
def predict(self, X_, y_=None, log=True, to_numpy=True):
## run evaluation and return list of results as is. Score will handle metrics
def eval_pass(x):
with torch.no_grad():
self.nnModel.eval()
outz = self.nnModel.forward(x)
o = torch.argmax( outz ).cpu()
return o.numpy() if to_numpy else o
yhat = []
if isinstance(X_, DataLoader):
pass
else:
for x in X_:
yhat.append( eval_pass(x) )
if log:
ACC_ = self.zscore(yhat, y_)
return yhat
class ZModelKMeans(ZSModel):
def __init__(self):
pass
def train(self, X_train):
pass
def predict(self, X_):
pass
### --- TBD: Skorch == sklearn + pytorch already! for now i want to moi-flow
### --- TODO: Captum == Model intepretability for PyTorch
class ZTrainingManager():
MSG_GSEARCH_RESULTS = "[{:7s}] Best score = {:2.4f} estimator = {:10s} paramz = {:50s}".format ## SCORE, ESTIMATOR, PARAMZ
### === workflow management, permutations and hyperparam tuning configuration
def __init__(self, data_pipez=None, model_pipez=None): ## NO:ByNNs = setup layers and initialize model weights and biases
# workflow management, permutations and hyperparam tuning configuration
if data_pipez is not None and model_pipez is not None:
self.build_permutationz(data_pipez, model_pipez)
### === setup permutationz ===
def build_permutationz(self, data_pipez, model_pipez):
'''
data_pipez : list of data pipelines
model_pipez : list of (model pipelines and grid search params) tuples
'''
self.permutationz = [] ## reset
d_, m_ = np.meshgrid( range( len(data_pipez)), range(len(model_pipez)) )
for x, y in zip(d_, m_):
for i, j in zip(x, y):
self.permutationz.append( (data_pipez[i], model_pipez[j]) )
### === run training with skl.grid search on each permutation
def run(self, data_X, data_y = None, train_test_split=1., save_best=False, log=False):
## for each permutation apply data and grid search
## 1. Deal data allocation TODO: when to train_test split
if isinstance(data_X, np.ndarray):
n_train = int(len(data_X)*train_test_split)
train_X, test_X = data_X[:n_train], data_X[n_train:] ## TODO: check if a ZPdData or something or array/list
train_y, test_y = [], [] ## hack for print :/
if data_y is not None:
train_y, test_y = data_y[:n_train], data_y[n_train:]
if log:
print( type(train_y[0]), train_y[0].shape )
if log:
print( type(train_X[0]), train_X[0].shape )
print(f"Train-Test-Split {train_test_split}: train = {len(train_X)}, {len(train_y)} \t test = {len(test_X)}, {len(test_y)}")
else: ## TODO: dataloader, Zdataset
train_X, train_y = data_X, data_y
test_X, test_y = [], []
## 2. train
O_ = []
for i in tqdm( range( len( self.permutationz ) ) ):
o = self._run_permutation(i, train_X, train_y )
p = f"Perm_{i+1}"
O_.append( [p,*o] )
# print("<<<<<<<\n", o, "\n>>>>>>>>")
ZReporter.add_log_entry( ZTrainingManager.MSG_GSEARCH_RESULTS(f"{p} {o[0]}", o[1], *[str(i) for i in o[2:]]) )
## 3. test/validate
return O_
def _run_permutation(self, idx, X, y , log=False):
data_pipe, model_pipe = self.permutationz[ idx]
model_pipe, g_paramz = model_pipe
def update_gsearch_param_keys(mp, gp):
O_ = {}
m = mp.steps[-1][0]
for k, v in gp.items():
O_[ f"model_pipe__{m}__{k}" ] = v
if log:
print(f"\n\n***********{m}***********")
return O_ , m
g_paramz , m_name = update_gsearch_param_keys(model_pipe, g_paramz)
# print( data_pipe )
dz = "__".join([str(x[0]) for x in data_pipe.steps])
m_name = f"{m_name} {dz}"
piper = Pipeline([ ('data_pipe', data_pipe),
('model_pipe', model_pipe)])
# print(f"============\n{piper}\n{g_paramz}\n==============<<<<")
gsearch = GridSearchCV(estimator=piper,
param_grid=g_paramz,
cv=StratifiedKFold(n_splits=2), ## random_state=99, shuffle=True
n_jobs=1,
return_train_score=True)
gsearch.fit(X, y)
return (m_name, gsearch.best_score_, gsearch.best_estimator_ , gsearch.best_params_)
def _save_best_model(self): ## TODO
pass
if __name__ == "__main__":
from sklearn.utils.estimator_checks import check_estimator
# check_estimator( ZModel() ) ## check adheres to sklearn interface and standards
# TODO: parametrize_with_checks pytest decorator
epochz = 3
N, nf, nclasses = 12, 40, 2 #.reshape(1, -1)
tmpX = [ torch.tensor( np.random.randint(0, 100, size=nf).reshape(1, -1).astype(np.float32) ) for i in range(N)]
tmpY = [ x.sum()**2 for x in tmpX]
ymu = np.array(tmpY).mean() ##mean
ymu = np.percentile( np.array(tmpY), 0.5) ## median
tmpY = [ torch.tensor( np.array([ int(y > ymu),] ).astype(np.long) ) for y in tmpY] ## TODO: qcut percentile
tmpX = tmpX * 300
tmpY = tmpY * 300
n_train = int(len(tmpY)*0.75)
mlp = nnarchs.ZNNArchitectureFactory.mlp(nf, nclasses)
print(mlp )
model = ZModel( "Tryzex", mlp, epochs=epochz,
loss_func=(nn.CrossEntropyLoss, {} ),
optimizer=(optim.SGD, {'lr':0.001, 'momentum':0.9} ) )
print( model )
model.train(tmpX[:n_train], tmpY[:n_train])
yhat = model.predict(tmpX[n_train:], tmpY[n_train:] , log=True)
# model.score(yhat, tmpY[n_train:])
c = "="
print(f"{c*10} End Model --> Dumping to File {c*10}\n")
fpath = "./tester.zmd"
model.dump(fpath )
model2 = ZModel("Tryzex_2",)
model2.load(fpath)
model2.predict(tmpX, tmpY , log=True)
model2.epochs =5
model2.fit(tmpX[:n_train], tmpY[:n_train])
yhat = model2.transform(tmpX[n_train:], tmpY[n_train:])
model2.zscore(yhat, tmpY[n_train:])
print( f"****** SHAPEZ::: *****\n yhat ={yhat[0].shape} y = {tmpY[0].shape }")
print(f"{c*10} End Model2 <-- Loaded from file, predicted, retrained and predicted {c*10}\n")
epochz | |
import serial.serialutil
import pygame, math, sys, uart, serial, threading, time, numpy
from scipy.optimize import curve_fit
from statistics import pstdev
# Constants
BLUE = (25, 25, 200)
PURPLE = (150, 25, 200)
BLACK = (23, 23, 23)
GREY = (56, 56, 56)
WHITE = (254, 254, 254)
RED = (200, 25, 25)
GREEN = (25, 200, 25)
CM_TO_PX = 1.724
SCREEN_WIDTH = 1920
SCREEN_HEIGHT = 1080
move_avg = [14]
turn_avg = [70.12]
ir_cal = [1043, 745, 610, 487, 2191, 1530, 1169, 945, 778, 672, 583, 528, 466, 406, 381, 330, 298, 268, 248, 225, 236, 235, 196, 235, 196, 168, 144, 166, 120]
pg_cal = [16.12, 22.64, 27.04, 31.84, 7.79, 11.29, 14.86, 19.04, 23.14, 26.72, 28.85, 33.23, 36.98, 40.65, 44.12, 48.08, 51.49, 26.79, 33.26, 39.66, 66.9, 1000.0, 1000.0, 1000.0, 1000.0, 64.08, 8.82, 1000.0, 69.69]
configs = {
3: {'COEFF': 67040.0548282411, 'PWR': -1.0002365394867478},
9: {'COEFF': 86338.93790129754, 'PWR': -1.0514768371387075}
}
#Which Cybot we're using
CYBOT_NUM = 3
COEFF = configs[CYBOT_NUM]['COEFF']
PWR = configs[CYBOT_NUM]['PWR']
IR_RAW = 0
CliffData = []
def polar_to_cart(deg, amt):
"""Convert polar coordinates to cartesian"""
x = float(amt) * math.sin(math.radians(int(deg)))
y = float(amt) * math.cos(math.radians(int(deg)))
return x,y
def line_of_best_fit():
"""Set formula for calculating ir values"""
def objective(x, a, b):
return a * pow(x, b)
global COEFF, PWR
popt, _ = curve_fit(objective, ir_cal, pg_cal)
COEFF, PWR = popt
print("COEFF = " + str(COEFF))
print("PWR = " + str(PWR))
def ir_to_cm(val):
"""Convert ir values into centimeters"""
return COEFF * pow(val,PWR)
def get_dist(x1,x2,y1,y2):
"""Find distance between two points"""
return abs(math.sqrt(pow(x1-x2,2) + pow(y1-y2,2)))
def avg(list):
"""Find average value in a list or list-like object"""
return sum(list) / len(list)
class Player():
"""Cybot player class"""
def __init__(self):
self.x = SCREEN_WIDTH / 2
self.y = SCREEN_HEIGHT / 2
self.servo_pos = 0
self.rot = 90.0
self.size = 34.8 * CM_TO_PX
self.rect = pygame.Rect(self.x-30,self.y-30,60,60)
self.manual = True
self.lBump = ""
self.rBump = ""
self.estimating = False
def position(self, angle, dist):
"""Update position info"""
self.rot += angle
# movement handling
x, y = polar_to_cart(self.rot, dist * CM_TO_PX)
self.x += x
self.y += y
self.rect = pygame.Rect(self.x-(self.size/2),self.y-(self.size/2),self.size,self.size)
def bump(self, leftBump, rightBump):
"""Update bumper status on gui and draw"""
if leftBump == 1:
x, y = polar_to_cart(self.rot + 45, self.size / 1.5)
self.lBump = "left "
CliffData.append(Cliff(False, self.x + x, self.y + y, self.x, self.y, bump=True))
else: self.lBump = ""
if rightBump == 1:
x, y = polar_to_cart(self.rot - 45, self.size / 1.5)
self.rBump = "right "
CliffData.append(Cliff(False, self.x + x, self.y + y, self.x, self.y, bump=True))
else: self.rBump = ""
def cliff(self, cliffVal) :
"""Cliff handling"""
lx, ly = polar_to_cart(self.rot + 45, self.size / 1.5)
flx, fly = polar_to_cart(self.rot + 15, self.size / 1.5)
rx, ry = polar_to_cart(self.rot - 45, self.size / 1.5)
frx, fry = polar_to_cart(self.rot - 15, self.size / 1.5)
if cliffVal & 0b1:
# l high
CliffData.append(Cliff(False,self.x + lx,self.y + ly,self.x,self.y))
if cliffVal & 0b10:
# l low
CliffData.append(Cliff(True,self.x + lx,self.y + ly,self.x,self.y))
if cliffVal >> 2 & 0b1:
# lf high
CliffData.append(Cliff(False,self.x + flx,self.y + fly,self.x,self.y))
if cliffVal >> 2 & 0b10:
# lf low
CliffData.append(Cliff(True,self.x + flx,self.y + fly,self.x,self.y))
if cliffVal >> 4 & 0b1:
# r high
CliffData.append(Cliff(False,self.x + rx,self.y + ry,self.x,self.y))
if cliffVal >> 4 & 0b10:
# r low
CliffData.append(Cliff(True,self.x + rx,self.y + ry,self.x,self.y))
if cliffVal >> 8 & 0b1:
# rf high
CliffData.append(Cliff(False,self.x + frx,self.y + fry,self.x,self.y))
if cliffVal >> 8 & 0b10:
# rf low
CliffData.append(Cliff(True,self.x + frx,self.y + fry,self.x,self.y))
def calibrate_ir(self):
"""Auto-calibrate ir sensor w/ ping sensor"""
print("Calibrating IR sensors...")
while self.lBump == "" and self.rBump == "":
cybot_uart.send_data('w')
time.sleep(0.25)
cybot_uart.send_data('w') # stop moving
time.sleep(0.1)
for _ in range(20):
cybot_uart.send_data('s')
time.sleep(0.25)
cybot_uart.send_data('s')
cybot_uart.send_data('m')
time.sleep(0.5)
if ScanData[len(ScanData) - 1].pg[0] >= 500: continue
ir_cal.append(IR_RAW)
pg_cal.append(ScanData[len(ScanData) - 1].pg[0])
print("Complete!")
print("Recommend: Find line of best fit (f).")
def forward(self):
"""Move forward until key isn't pressed"""
self.estimating = True
cybot_uart.send_data('w')
while self.estimating: continue
cybot_uart.send_data('w')
def back(self):
"""Move backward until key isn't pressed."""
self.estimating = True
cybot_uart.send_data('s')
while self.estimating: continue
cybot_uart.send_data('s')
def left(self):
"""Turn left until key isn't pressed."""
self.estimating = True
cybot_uart.send_data('a')
while self.estimating: continue
cybot_uart.send_data('a')
def right(self):
"""Turn right until key isn't pressed/"""
self.estimating = True
cybot_uart.send_data('d')
while self.estimating: continue
cybot_uart.send_data('d')
def clear(self):
self.x = SCREEN_WIDTH / 2
self.y = SCREEN_HEIGHT / 2
self.rot = 0
self.rect = pygame.Rect(self.x-30,self.y-30,60,60)
ScanData.clear()
def scan(self,theta,ir,pg):
"""Process results of single scan point"""
global IR_RAW
self.servo_pos = theta
IR_RAW = int(ir) # for calibration
ir = ir_to_cm(ir)
irx, iry = polar_to_cart(int(self.servo_pos) - 90 + self.rot, ir * CM_TO_PX)
pgx, pgy = polar_to_cart(int(self.servo_pos) - 90 + self.rot, pg * CM_TO_PX)
offsetx, offsety = polar_to_cart(self.rot, float(34.8 / 2) * CM_TO_PX)
#Cutoff for IR sensor, in cm
if ir < 65:
ScanData.append(Point(self.x+irx+offsetx, self.y+iry+offsety, self.x+pgx+offsetx, self.y+pgy+offsety,ir,pg))
#Average IR and Ping for more accurate results
avg_x = (self.x+pgx+offsetx + self.x+irx+offsetx) / 2
avg_y = (self.y+pgy+offsety + self.y+iry+offsety) / 2
#Add obstacle to grid at calculated coordinates
obstacle_grid[avg_x][avg_y] = Obstacle(self.x+irx+offsetx, self.y+iry+offsety, avg_x, avg_y)
print(obstacle_grid)
class Point():
"""Class to hold scan data"""
def __init__(self,irx,iry,pgx,pgy,ir,pg):
self.ir = [ir,[irx,iry]]
self.pg = [pg,[pgx,pgy]]
class Obstacle():
"""
This represents an obstacle in the field.
Contains a PointCloud object holding all points determined to be a part of this object.
"""
def __init__(self, ix, iy, px, py):
self.ix = ix
self.iy = iy
self.px = px
self.py = py
self.points = PointCloud((px, py))
def __str__(self):
return "Obstacle(" + str(self.px) + ", " + str(self.py) + ", irx: " + str(self.ix) + ", iry: " + str(self.iy) + ")"
def __repr__(self):
return self.__str__()
class PointCloud(list):
"""
Represents a point cloud that defines an obstacle. Dynamically calculates centerpoint,
and automatically rejects outlier points using calculated Z-score.
"""
def __init__(self, center):
super().__init__(self)
self.least_point = None
self.most_point = None
self.center = center
def append(self, item):
"""
All the magic happens here. Calculates Z-score for point being appended, automatically
rejects if outside bounds. If within bounds, appends and recalculates centerpoint.
"""
distance = math.hypot(item[0] - self.center[0], item[1] - self.center[1])
if(len(self) > 1):
x_stdev = pstdev([a[0] for a in self])
y_stdev = pstdev([a[1] for a in self])
if(x_stdev != 0 and y_stdev != 0):
x_score = (item[0] - self.center[0]) / x_stdev # x
y_score = (item[1] - self.center[1]) / y_stdev #
print(x_score)
print(y_score)
if(x_score > 8 or y_score > 8):
return
super().append(item)
#Update centerpoint
x = [p[0] for p in self]
y = [p[1] for p in self]
self.center = (sum(x) / len(self), sum(y) / len(self)) #mew char
#Update points used for diameter calculations.
if self.least_point == None or (item[0] < self.least_point[0] and item[1] < self.least_point[1]):
self.least_point = item
if self.most_point == None or (item[0] > self.most_point[0] and item[1] > self.most_point[1]):
self.most_point = item
class Grid():
'''
Class for transparently working on the grid.
Acts similar to 2d list. If grid[a] does not exist, the inner list is automatically created.
Index into the grid like so: grid[x][y].
If no obstacle exists at (x,y), returns None.
When adding to inner list, first looks at locations within near_threshold to see if obstacle already exists.
'''
def __init__(self, near_threshold=5, outer=True, container=None):
self.grid_dict = {}
self.near_threshold = near_threshold
self.outer = outer
self.container = container
self.points = []
def get_obstacles(self):
obstacles = []
for row in self.grid_dict.values():
for obst in row.grid_dict.values():
obstacles.append(obst)
return obstacles
def clear(self):
del self.grid_dict
self.grid_dict = {}
def __getitem__(self, key):
"""
If key does not exist and is outer, create inner Grid() at key.
If key does not exist and is inner, return None.
"""
key = int(key)
try:
return self.grid_dict[key]
except KeyError:
if(self.outer):
self.grid_dict[key] = Grid(near_threshold=self.near_threshold,outer=False, container = self)
return self.grid_dict[key]
return None
def __setitem__(self, key, newval):
"""
Looks at values near key to determine if an obstacle is nearby, if
so, then add to that obstacle's point cloud instead of adding to grid.
Otherwise, add to grid.
"""
if(not self.outer):
x = int(newval.px)
y = int(newval.py)
for i in range(int(x - self.near_threshold), int(x + self.near_threshold)):
for j in range(int(y - self.near_threshold), int(y + self.near_threshold)):
if self.container[i][j] != None:
self.container[i][j].points.append((newval.px, newval.py))
return
#If no near obstacle found, add to grid
self.grid_dict[int(key)] = newval
def __str__(self):
return str(self.grid_dict)
def __repr__(self):
return | |
NT 5.1; .NET CLR 1.1.4322)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))')
uagent.append('Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)')
uagent.append('Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8')
uagent.append('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)')
uagent.append('Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)')
uagent.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)')
uagent.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51')
uagent.append('Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)')
uagent.append('Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15')
uagent.append('Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57')
uagent.append('Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)')
uagent.append('Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0')
uagent.append('Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g')
uagent.append('Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)')
uagent.append('Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125')
uagent.append('Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)')
uagent.append('Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)')
uagent.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)')
uagent.append('Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)')
uagent.append('Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)')
uagent.append('Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10')
uagent.append('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0')
uagent.append('Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10')
uagent.append('Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)')
uagent.append('Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)')
uagent.append('Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)')
uagent.append('Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/uagent)')
uagent.append('Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16')
uagent.append('Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1')
uagent.append('Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)')
uagent.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51')
uagent.append('Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)')
uagent.append('Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7')
uagent.append('BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0')
uagent.append('Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)')
uagent.append('Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)')
uagent.append('Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)')
uagent.append('Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)')
uagent.append('Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007')
uagent.append('BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179')
uagent.append('Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)')
uagent.append('Googlebot/2.1 (http://www.googlebot.com/bot.html)')
uagent.append('Opera/9.20 (Windows NT 6.0; U; en)')
uagent.append('Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)')
uagent.append('Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)')
uagent.append('Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16')
uagent.append('Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)')
uagent.append('Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13')
uagent.append('Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)')
uagent.append('Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)')
uagent.append('Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)')
uagent.append('Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8')
uagent.append('Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7')
uagent.append('Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)')
uagent.append('Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)')
uagent.append('YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)')
uagent.append('Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)')
uagent.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)')
uagent.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51')
uagent.append('AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)')
uagent.append('Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)')
uagent.append('Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)')
uagent.append('Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)')
uagent.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E')
uagent.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)')
uagent.append('Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15')
uagent.append('Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57')
uagent.append('Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)')
uagent.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)')
uagent.append('Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0')
uagent.append('Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g')
uagent.append('Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)')
uagent.append('Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125')
uagent.append('Mozilla/5.0 (X11; U; Linux i686 | |
if not name and not rule_id:
raise SDKException(
'OperationWindow',
'102',
'Either Name or Rule Id is needed')
if name and not isinstance(name, str) or rule_id and not isinstance(rule_id, int):
raise SDKException('OperationWindow', '106')
if name:
rule_id = self.get(name=name).rule_id
flag, response = self._cvpysdk_object.make_request(
'DELETE', self._operation_window + '/' + str(rule_id))
if flag:
if response.json():
error_code = response.json().get("error", {}).get('errorCode')
if int(error_code):
raise SDKException('OperationWindow', '103')
else:
raise SDKException('Response', '102')
else:
response_string = self._update_response(
response.text)
raise SDKException('Response', '102', response_string)
def list_operation_window(self):
"""Lists the operation rules for the associated commcell entity.
Returns:
Returns the List of operation rules (dictionary) associated with given commcell entity
Example --
[{'ruleEnabled': True,
'endDate': 0,
'level': 0,
'name': 'Rule1',
'ruleId': 1,
'startDate': 0,
'operations': ['FULL_DATA_MANAGEMENT', 'NON_FULL_DATA_MANAGEMENT'],
'company': {'_type_': 61,
'providerId': 0,
'providerDomainName': ''},
'dayTime': [{'startTime': 28800,
'endTime': 64800,
'dayOfWeek': ['sunday','monday']}]}
]
Raises:
SDKException:
if the Operation rules could not be Listed
if response is empty
if response is not success
"""
flag, response = self._cvpysdk_object.make_request(
'GET', self.connect_string)
if flag:
if response.json():
error_code = response.json().get("error", {}).get('errorCode')
if int(error_code) == 0:
list_of_rules = response.json().get("operationWindow")
operation_reverse_mapping = {value:key for key, value in OPERATION_MAPPING.items()}
if list_of_rules is not None:
for operation_rule in list_of_rules:
operations = operation_rule.get("operations")
day_of_week = operation_rule.get("dayTime", [{}])[0].get("dayOfWeek")
if operations is not None:
operation_rule["operations"] = [operation_reverse_mapping[operation]
for operation in operations]
if day_of_week is not None:
operation_rule["dayTime"][0]["dayOfWeek"] = [DAY_OF_WEEK_MAPPING[day]
for day in day_of_week]
return list_of_rules
raise SDKException('OperationWindow', '104')
raise SDKException('Response', '102')
response_string = self._update_response(response.text)
raise SDKException('Response', '102', response_string)
def get(self, rule_id=None, name=None):
"""Returns the operation rule object for a given rule
Args:
rule_id (int) -- Rule Id of an operation Window
name (str) -- Name of the operation window
Returns:
object - instance of the OperationWindowDetails class
for the given operation window name/rule
Raises:
SDKException:
if type of the operation window name argument is not string
if no operation window exists with such name
"""
if not name and not rule_id:
raise SDKException(
'OperationWindow',
'102',
'Either Name or Rule Id is needed')
if name and not isinstance(name, str) or rule_id and not isinstance(rule_id, int):
raise SDKException('OperationWindow', '106')
list_of_rules = self.list_operation_window()
if rule_id:
for operation_rule in list_of_rules:
if operation_rule.get("ruleId") == rule_id:
return OperationWindowDetails(self.generic_entity_obj, rule_id, self.entity_details)
raise Exception("No such operation window with rule id as {0} exists".format(rule_id))
if name:
rules = [operation_rule.get("ruleId") for operation_rule in list_of_rules
if operation_rule.get("name") == name]
if not rules:
raise Exception("No such operation window with name as {0} exists".format(name))
if len(rules) == 1:
return OperationWindowDetails(self.generic_entity_obj, rules[0], self.entity_details)
raise Exception("More than one operation window are named as {0} exists".format(name))
class OperationWindowDetails:
"""Helper class for modifying operation window"""
def __init__(self, generic_entity_obj, rule_id, entity_details):
"""
Initialize the OperationWindowDetails class instance for
modifying OperationWindow.
Args:
generic_entity_obj (object) -- Commcell entity object
Expected value : commcell/Client/Agent/Instance/BackupSet/Subclient/Clientgroup Entity
rule_id (int) -- Rule id of the operation window to be modified
entity_details -- Details related to the entity
Usually gets initialized from OperationWindow class
Returns:
object - instance of the OperationWindowDetails class
"""
from .commcell import Commcell
if isinstance(generic_entity_obj, Commcell):
self._commcell_object = generic_entity_obj
else:
self._commcell_object = generic_entity_obj._commcell_object
self._cvpysdk_object = self._commcell_object._cvpysdk_object
self._update_response = self._commcell_object._update_response_
self._commcell_services = self._commcell_object._services
self._operation_window = self._commcell_services['OPERATION_WINDOW']
self._rule_id = rule_id
self._name = None
self._start_date = None
self._end_date = None
self._operations = None
self._day_of_week = None
self._start_time = None
self._end_time = None
self._commcell_id = self._commcell_object.commcell_id
self._clientgroup_id = entity_details["clientGroupId"]
self._client_id = entity_details["clientId"]
self._agent_id = entity_details["applicationId"]
self._instance_id = entity_details["instanceId"]
self._backupset_id = entity_details["backupsetId"]
self._subclient_id = entity_details["subclientId"]
self._entity_level = entity_details["entity_level"]
self._refresh()
def modify_operation_window(self, **modify_options):
"""Modifies the Operation rule.
Args:
modify_options(dict) -- Arbitrary keyword arguments.
modify_options Args:
name (str) -- Name of the Operation rule
start_date (int) -- The start date for the operation rule.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
default - current date
end_date (int) -- The end date for the operation rule.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
default - 365 days
operations (list) -- List of operations for which the operation
window is created
Acceptable Values:
FULL_DATA_MANAGEMENT/NON_FULL_DATA_MANAGEMENT/SYNTHETIC_FULL/
DATA_RECOVERY/AUX_COPY/DR_BACKUP/DATA_VERIFICATION/ERASE_SPARE_MEDIA/
SHELF_MANAGEMENT/DELETE_DATA_BY_BROWSING/DELETE_ARCHIVED_DATA/
OFFLINE_CONTENT_INDEXING/ONLINE_CONTENT_INDEXING/SRM/INFORMATION_MANAGEMENT/
MEDIA_REFRESHING/DATA_ANALYTICS/DATA_PRUNING/BACKUP_COPY/CLEANUP_OPERATION
day_of_week (list) -- List of days of the week on which the operation rule applies to
Acceptable Values:
sunday/ monday/ tuesday/ wednesday/ thursday/ friday/ saturday
default- Weekdays
start_time (int) -- The start time for the "do not run" interval.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
default - 28800 (8 AM)
end_time (int) -- The end time for the "do not run" interval.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
default - 86400 (6 PM)
Raises:
SDKException:
if the Operation window could not be Modified
if response is empty
if response is not success
"""
start_date = modify_options.get("start_date", self.start_date)
end_date = modify_options.get("end_date", self.end_date)
start_time = modify_options.get("start_time", self.start_time)
end_time = modify_options.get("end_time", self.end_time)
name = modify_options.get("name", self.name)
operations = modify_options.get("operations", self.operations)
day_of_week = modify_options.get("day_of_week", self.day_of_week)
if not operations:
# Empty list can be passed
operations_list = operations
else:
operations_list = [OPERATION_MAPPING[operation.upper()] for operation in operations]
day_of_week_list = [DAY_OF_WEEK_MAPPING.index(day.lower()) for day in day_of_week]
payload = {
"operationWindow": {
"ruleEnabled": True,
"startDate": start_date,
"endDate": end_date,
"name": name,
"ruleId": int(self.rule_id),
"operations": operations_list,
"dayTime": [{
"startTime": start_time,
"endTime": end_time,
"dayOfWeek": day_of_week_list
}]
},
"entity": {
"clientGroupId": int(self._clientgroup_id),
"clientId": int(self._client_id),
"applicationId": int(self._agent_id),
"instanceId": int(self._instance_id),
"backupsetId": int(self._backupset_id),
"subclientId": int(self._subclient_id)
}
}
flag, response = self._cvpysdk_object.make_request(
'PUT', self._operation_window, payload=payload)
if flag:
if response.json():
error_code = response.json().get("error", {}).get('errorCode')
if int(error_code) == 0:
int(response.json().get('operationWindow', {}).get('ruleId'))
self._refresh()
else:
raise SDKException('OperationWindow', '105')
else:
raise SDKException('Response', '102')
else:
response_string = self._update_response(response.text)
raise SDKException('Response', '101', response_string)
def _refresh(self):
"""Refreshes the properties of a rule"""
self._get_rule_properties()
def _get_rule_properties(self):
"""
Assigns the properties of an operation rule by getting the rule using rule id
"""
xml = "<Api_GetOperationWindowReq><ruleId>" + str(self.rule_id) + "</ruleId></Api_GetOperationWindowReq>"
response_json = self._commcell_object._qoperation_execute(xml)
if response_json:
error_code = response_json.get("error", {}).get('errorCode')
if int(error_code) == 0:
response_json = response_json.get('operationWindow', {})[0]
self._name = response_json.get('name')
self._start_date = response_json.get('startDate')
self._end_date = response_json.get('endDate')
operations = response_json.get('operations')
operation_reverse_mapping = {value:key for key, value in OPERATION_MAPPING.items()}
self._operations = [operation_reverse_mapping[operation] for operation in operations]
day_of_week = response_json.get('dayTime', [{}])[0].get('dayOfWeek')
self._day_of_week = [DAY_OF_WEEK_MAPPING[day] for day in day_of_week]
self._start_time = response_json.get('dayTime', [{}])[0].get('startTime')
self._end_time = response_json.get('dayTime', [{}])[0].get('endTime')
else:
raise SDKException('OperationWindow', '102',
response_json.get("error", {}).get('errorMessage'))
else:
raise SDKException('Response', '102')
@property
def name(self):
"""Treats name as a read-only attribute."""
return self._name
@name.setter
def name(self, name):
"""
Modifies the name of the operation rule
Args:
name: (str) --Name of the operation rule to be modified
"""
self.modify_operation_window(name=name)
@property
def start_date(self):
"""Treats start_date as a read-only attribute."""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""
Modifies the start_date of the operation rule
Args:
start_date: (int) --The start date for the operation rule.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
Returns: None
"""
self.modify_operation_window(start_date=start_date)
@property
def end_date(self):
"""Treats end_date as a read-only attribute."""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""
Modifies the end_date of the operation rule
Args:
end_date: (int) -- The end date for the operation rule.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
Returns: None
"""
self.modify_operation_window(end_date=end_date)
@property
def operations(self):
"""Treats opearations as a read-only attribute."""
return self._operations
@operations.setter
def operations(self, operations):
"""
Modifies the operations of the operation rule
Args:
operations: (list) -- List of operations for which the operation
window is created
Acceptable Values:
FULL_DATA_MANAGEMENT/NON_FULL_DATA_MANAGEMENT/SYNTHETIC_FULL/
DATA_RECOVERY/AUX_COPY/DR_BACKUP/DATA_VERIFICATION/ERASE_SPARE_MEDIA/
SHELF_MANAGEMENT/DELETE_DATA_BY_BROWSING/DELETE_ARCHIVED_DATA/
OFFLINE_CONTENT_INDEXING/ONLINE_CONTENT_INDEXING/SRM/INFORMATION_MANAGEMENT/
MEDIA_REFRESHING/DATA_ANALYTICS/DATA_PRUNING/BACKUP_COPY/CLEANUP_OPERATION
Returns: None
"""
self.modify_operation_window(operations=operations)
@property
def day_of_week(self):
"""Treats day_of_week as a read-only attribute."""
return self._day_of_week
@day_of_week.setter
def day_of_week(self, day_of_week):
"""
Modifies the day_of_week of the operation rule
Args:
day_of_week: (list) -- List of days of the week on which the operation rule applies to
Acceptable Values:
sunday/ monday/ tuesday/ wednesday/ thursday/ friday/ saturday
Returns: None
"""
self.modify_operation_window(day_of_week=day_of_week)
@property
def start_time(self):
"""Treats start_time as a read-only attribute."""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""
Modifies the start_time of the operation rule
Args:
start_time: (int) -- The start time for the "do not run" interval.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
Returns: None
"""
self.modify_operation_window(start_time=start_time)
@property
def end_time(self):
"""Treats end_time as a read-only attribute."""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""
| |
<filename>kamodo/readers/tiegcm.py
import numpy as np
from kamodo import Kamodo, kamodofy, gridify
import time
from scipy.interpolate import RegularGridInterpolator, interp1d
from datetime import datetime,timedelta
# pip install pytiegcm
from tiegcm.tiegcm import TIEGCM
# 'UN', 'VN', 'O1', 'NO', 'N4S', 'HE', 'NE', 'TE', 'TI', 'TEC', 'O2', 'O2P_ELD', 'OMEGA', 'POTEN', 'UI_ExB', 'VI_ExB', 'WI_ExB', 'OP', 'N2P_ELD', 'NPLUS', 'NOP_ELD', 'SIGMA_PED', 'SIGMA_HAL', 'DEN', 'QJOULE', 'Z', 'ZG', 'O_N2', 'QJOULE_INTEG', 'EFLUX', 'HMF2', 'NMF2', 'N2D_ELD', 'O2N', 'N2N', 'ZMAG', 'TLBC', 'ULBC', 'VLBC', 'TLBC_NM', 'ULBC_NM', 'VLBC_NM', 'LBC', 'latitude', 'longitude'
# constants and dictionaries
<EMAIL>
def totalseconds_to_datetime(seconds):
date0=datetime(1970,1,1,0,0,0)
date1=date0+timedelta(seconds=seconds)
return(date1)
def seconds_from_1970(_datetime):
datetime0=datetime(1970,1,1,0,0,0)
_timedelta=_datetime-datetime0
return(_timedelta.total_seconds())
def parse_units(varname, variable):
"""Parses units based on variable input"""
try:
units = variable.units
units = units.replace('-','**-')
units = units.replace('mmr','1').replace('S', 'ohm**-1')
units = units.replace('cm3','cm**3').replace('erg/g/s', 'erg/(g*s)')
units = units.replace('none','1')
except:
units = ''
if varname == 'Z':
units = 'cm'
return units
def sort_variables(self, variables):
variables_3d = []
variables_4d = []
for varname in variables:
try:
varshape = self._tiegcm.rootgrp.variables[varname].shape
except:
continue
if len(varshape) == 4:
variables_4d.append(varname)
elif len(varshape) == 3:
variables_3d.append(varname)
return variables_4d + variables_3d
class TIEGCM_Kamodo(Kamodo):
def __init__(self, filename,
variables_requested = None,
date = None,
time = None,
debug= False,
runpath = "./",
runname = "noname",
**kwargs):
self._tiegcm = TIEGCM(filename)
self._ilev = np.array(self._tiegcm.rootgrp.variables['ilev'])
self._time = np.array(self._tiegcm.rootgrp.variables['time'])
self._year=np.array(self._tiegcm.rootgrp.variables['year'])
self._day=np.array(self._tiegcm.rootgrp.variables['day'])
self._mtime=np.array(self._tiegcm.rootgrp.variables['mtime'])
_seconds=[]
for i in range(0,len(self._year)):
year=self._year[i]
day,hour,minute=self._mtime[i]
_datetime=datetime(year,1,1,int(hour),int(minute),0)+timedelta(days=int(day)-1)
_seconds.append(seconds_from_1970(_datetime))
self._time=np.array(_seconds)
self._lat = self._tiegcm.lat
self._lon = self._tiegcm.lon
self._registered = 0
self.time_of_day=self._time[0]
self.datetime=totalseconds_to_datetime(self.time_of_day)
super(TIEGCM_Kamodo, self).__init__()
print('opening {}'.format(filename))
# needed by CCMC online visualization based on Kamodo
self.filename = filename
self.runpath = runpath
self.runname = runname
self.missing_value = np.NAN
self.variables=dict()
# Interpolation values
self.plots = dict()
self.plottype = "LonLat"
self.cut = 'IP'
self.cutunit = '[]'
self.cutV = 0. # within all the coordinate ranges
self.nT = 1
self.nX = 1
self.nY = 37
self.nZ = 73
self.newt = 1440.
self.newx = self.cutV
self.newy = np.linspace(-90., 90., self.nY)
self.newz = np.linspace(-180., 180., self.nZ)
self.xname="IP"
self.xunit=''
self.yname="Lat"
self.yunit='deg'
self.zname="Lon"
self.zunit='deg'
self.lonrange=dict(min=self._lon.min(),max=self._lon.max(),n=len(self._lon))
self.latrange=dict(min=self._lat.min(),max=self._lat.max(),n=len(self._lat))
self.hrange=dict(min=80,max=450,n=34)
self._H=np.linspace(self.hrange['min'],self.hrange['max'],self.hrange['n'])
self.tol = 1.1
self.plots[self.plottype] = dict(cut=self.cut, cutV=self.cutV, tol=self.tol,
nT=self.nT,nX=self.nX, nY=self.nY, nZ=self.nZ,
newt=self.newt,
newx=self.newx,
newy=self.newy,
newz=self.newz)
self.gridSize=len(self._ilev)*len(self._lat)*len(self._lon)
self.gridMinDx=np.asarray([self._lon[1]-self._lon[0],self._lat[1]-self._lat[0]]).min()
# Get grids ready to use
self.setup_interpolating_grids()
if variables_requested is None:
variables = self._tiegcm.rootgrp.variables.keys()
else:
variables = variables_requested
variables = sort_variables(self, variables)
for varname in variables:
units = parse_units(varname, self._tiegcm.rootgrp.variables[varname])
try:
self._tiegcm.set_variable_boundary_condition(varname)
except:
# print('can not set boundary condition for {}, skipping..'.format(varname))
continue
self._tiegcm.wrap_variable(varname)
variable = self._tiegcm.rootgrp.variables[varname]
if len(variable.shape) not in [3,4]:
# skip this variable
continue
if varname == 'ZMAG':
continue
elif len(variable.shape) == 4:
self.register_4d_variable(units, variable, varname)
elif len(variable.shape) == 3:
self.register_3d_variable(units, variable, varname)
self.variables[varname]=dict(data=variable,units=units)
print('registered {} variables'.format(self._registered))
# register user's input variables, assuming kamodo-compatible
# for varname, variable in kwargs.items():
# self[varname] = variable
# def ilev(points):
# return self.lev(*points)
#
# self['ilev'] = ilev
# vertical interpolation as is done in CCMC-Vis (3DView) IDL code
@np.vectorize
# can't do gridify here unless 'self' is somwhow known
# @gridify(t = self._time, z=self._H, lat = self._lat, lon = self._lon)
def vert_interp(self,t, z, lat, lon):
# 1) meshgrid and squeeze the shapes of time, lat, lon if they are not the same
# 2) calculate z_levels for all points
# 3) construct rgi for (time, z_level, lat, lon)
# 4) interpolate over z
varname=self.varname
z_levels=np.squeeze(self['Z'](t=t,lat=lat,lon=lon))
data_levels=np.squeeze(self[varname](t=t,lat=lat,lon=lon))
interp = interp1d(z_levels, data_levels, bounds_error=False)
return interp(z)
def register_3d_variable(self, units, variable, varname):
"""Registers a 3d interpolator with 3d signature"""
rgi = RegularGridInterpolator((self._time, self._lat, self._lon),
variable, bounds_error = False)
@kamodofy(units = units, data = variable)
@gridify(t = self._time, lat = self._lat, lon = self._lon)
def interpolator(xvec):
"""Interpolates 3d variable"""
return rgi(xvec)
self[varname] = interpolator
self._registered += 1
def register_4d_variable(self, units, variable, varname):
"""Registers a 4d interpolator with 4d signature"""
rgi = RegularGridInterpolator((self._time, self._ilev, self._lat, self._lon),
variable, bounds_error = False)
@kamodofy(units = units, data = variable)
@gridify(t = self._time, ilev = self._ilev, lat = self._lat, lon = self._lon)
# @np.vectorize
def interpolator(xvec):
"""Interpolates 4d variable"""
return rgi(xvec)
self.variables['varname']=dict(data=variable,units=units,interpolator=rgi)
self[varname] = interpolator
self._registered += 1
@np.vectorize
def lev(self, t, z, lat, lon):
"""Finds ilev for a given height"""
# 2) calculate z_levels for all points
# 3) construct rgi for (time, z_level, lat, lon)
# 4) interpolate over z
z_levels = np.squeeze(self.Z(t = t, lat = lat, lon = lon)) # ilev has a default
level = interp1d(z_levels, self._ilev, bounds_error=False)
return level(z)
def set_plot(self,
plottype = "XY",
time_in_day="12:00:00",
date=None,
cutV = 0,
lonrange=dict(min=-180,max=180,n=73), # reference to self._lon_density not possible?
latrange=dict(min=-90,max=90,n=37),
hrange=dict(min=1,max=15,n=15) ):
'''Set plotting variables for available preset plot types: XY, YZ, YZ, XYZ'''
if 'min' not in lonrange.keys():
lonrange['min']=self.lonrange['min']
if 'max' not in lonrange.keys():
lonrange['max']=self.lonrange['max']
if 'n' not in lonrange.keys():
lonrange['n']=self.lonrange['n']
if 'min' not in latrange.keys():
latrange['min']=self.latrange['min']
if 'max' not in latrange.keys():
latrange['max']=self.latrange['max']
if 'n' not in latrange.keys():
latrange['n']=self.latrange['n']
if 'min' not in hrange.keys():
hrange['min']=self.hrange['min']
if 'max' not in hrange.keys():
hrange['max']=self.hrange['max']
if 'n' not in hrange.keys():
hrange['n']=self.hrange['n']
tic = time.perf_counter()
if plottype == self.plottype:
if cutV == self.cutV:
if lonrange['min'] == self.lonrange['min'] and lonrange['max'] == self.lonrange['max'] and lonrange['n'] == self.lonrange['n']:
if latrange['min'] == self.latrange['min'] and latrange['max'] == self.latrange['max'] and latrange['n'] == self.latrange['n']:
if hrange['min'] == self.hrange['min'] and hrange['max'] == self.hrange['max'] and hrange['n'] == self.hrange['n']:
print('Plottype (',plottype,') and cut value (',cutV,') are unchanged, returning.')
return
self.lonrange=lonrange
self.latrange=latrange
self.hrange=hrange
ip_max=70
h_km_max=7000
h_m_max=7e6
if date is None or time_in_day is None:
self.time_of_day=self._time[0]
self.datetime=totalseconds_to_datetime(self.time_of_day)
else:
self.plotdate=date
self.plottime=time_in_day
datetime0=datetime.strptime(date,"%Y/%m/%d")
datetime1=datetime.strptime(date+" "+time_in_day,"%Y/%m/%d %H:%M:%S")
self.datetime=datetime1
# self.time_of_day=(datetime1-datetime0).total_seconds()/60.
self.time_of_day=seconds_from_1970(datetime1)
print("datetime: ",self.datetime)
self.filetime=self.datetime.strftime("%Y/%m/%d %H:%M:%S")
self.date=self.datetime.strftime("%Y/%m/%d")
self.plottime=self.datetime.strftime("%H:%M:%S")
# time selection
self.nT = 1
# self.newt = self.plottime
self.newt = self.time_of_day
# X=IP or Height
# Y=Lat
# Z=Lon
if plottype == "LonLat" or plottype == "LonLatH":
# auto-detect whether we are using IP or H (or H in cm or km)
if cutV < ip_max:
self.cut="IP"
self.cutunit=" []"
self.cutfactor=1.
else:
self.cut="H"
if cutV < h_km_max:
self.cutunit=" [km]"
self.cutfactor=1e5
elif cutV < h_m_max:
self.cutunit=" [m]"
self.cutfactor=100.
else:
self.cutunit=" [cm]"
self.cutfactor=1.
self.plottype = plottype
self.cutV = cutV
self.nX = 1
self.nY = latrange['n']
self.nZ = lonrange['n']
self.newx = cutV*self.cutfactor
self.newy = np.linspace(latrange['min'],latrange['max'],latrange['n'])
self.newz = np.linspace(lonrange['min'],lonrange['max'],lonrange['n'])
else:
self.cutunit="[deg]" #both for Lon=const (LatIP, LatH) and Lat=const (LonIP, LonH)
# auto-detect whether we are using IP or H (or H in cm or km)
if hrange['max'] < ip_max:
self.xname="IP"
self.xunit="[]"
self.xfactor=1.
self.xformat=".4f"
else:
self.xname="H"
self.xformat=".4g"
if hrange['max'] < h_km_max:
self.xunit=" [km]"
self.xfactor=1e5
elif hrange['max'] < h_m_max:
self.xunit=" [m]"
self.xfactor=100.
else:
self.xunit="[cm]"
self.xfactor=1.
self.plottype = plottype
if plottype == "LonIP" or plottype == "LonH":
self.cut = 'Lat'
self.cutV = cutV
self.nX = hrange['n']
self.nY = 1
self.nZ = lonrange['n']
self.newx=np.linspace(hrange['min'],hrange['max'],hrange['n'])*self.xfactor
self.newy = cutV
self.newz=np.linspace(lonrange['min'],lonrange['max'],lonrange['n'])
elif plottype == "LatIP" or plottype == "LatH":
self.plottype = plottype
self.cut = 'Lon'
self.cutV = cutV
self.nX=hrange['n']
self.nY=latrange['n']
self.nZ=1
self.newx = np.linspace(hrange['min'],hrange['max'],hrange['n'])*self.xfactor
self.newy = np.linspace(latrange['min'],latrange['max'],latrange['n'])
self.newz = cutV
else:
print('Error, unknown plottype. ',plottype)
return
self.plots[plottype] = dict(cut=self.cut, cutV=self.cutV, tol=self.tol,
nT=self.nT,nX=self.nX, nY=self.nY, nZ=self.nZ,
newt=self.newt,newx=self.newx, newy=self.newy, newz=self.newz)
self.setup_interpolating_grids()
# self.reinterpolate_values()
# for varname in self.variables:
# self.plots[plottype][varname]=self.variables[varname]['interpolator']
toc = time.perf_counter()
print(f"Time resetting plot and precomputing interpolations: {toc - tic:0.4f} seconds")
return
def setup_interpolating_grids(self):
'''setup the grid to interpolate to, trim to necessary size of source grid, and compute interpolation weights'''
tt, xx, yy, zz = np.meshgrid(self.newt,self.newx,self.newy,self.newz, indexing = 'xy')
self.newgrid = np.ndarray(shape=(np.size(np.reshape(xx,-1)),4), dtype=np.float32)
self.newgrid[:,0] = np.reshape(tt,-1)
self.newgrid[:,1] = np.reshape(xx,-1)
self.newgrid[:,2] = np.reshape(yy,-1)
self.newgrid[:,3] = np.reshape(zz,-1)
print("newgrid shape: ",self.newgrid.shape)
def get_plot(self, var, colorscale="Viridis",style="linear"):
'''
Return a plotly figure object for the available plot types set in set_plot()..
colorscale = Viridis [default], Cividis, BlueRed or Rainbow
'''
#Set some text strings
txtbot="Model: TIEGCM, Run: " + str(self.runname) + ", " + str(self.gridSize) + " cells, minimum dx=" + str(self.gridMinDx)
if style == "linear":
txtbar=var + " [" + self.variables[var]['units'] + "]"
if style == "log":
txtbar="log("+var + ") [" + self.variables[var]['units'] + "]"
print("before interpolation")
# Get values from interpolation already computed
tic = time.perf_counter()
if (self.newgrid[:,1]).max() < 50: # IP or height is column 1 (CTIPe_singletime: 0)
# result=self[var](self.newgrid[:,0].ravel(),self.newgrid[:,1].ravel(),self.newgrid[:,2].ravel(),self.newgrid[:,3].ravel())
result=self[var](self.newt,self.newx,self.newy,self.newz)
else:
self.varname=var
# once we have a gridify'd version we can use this
# result=vert_interp(self.newt,self.newx,self.newy,self.newz)
# have to use np.vectorize'd version
result=self.vert_interp(self,self.newgrid[:,0].ravel(),self.newgrid[:,1].ravel(),self.newgrid[:,2].ravel(),self.newgrid[:,3].ravel())
toc = time.perf_counter()
print(f"Time for computing interpolations: {toc - tic:0.4f} seconds")
print("result.shape after interpolation: ",result.shape)
if self.plottype == "LonLat" | |
when the browse mode element is not found.
prevError=_("no previous embedded object"))
qn("annotation", key="a",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next annotation"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next annotation"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous annotation"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous annotation"))
qn("error", key="w",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next error"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next error"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous error"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous error"))
qn(
"article", key=None,
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next article"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next article"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous article"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous article")
)
qn(
"grouping", key=None,
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next grouping"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next grouping"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous grouping"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous grouping")
)
del qn
class ElementsListDialog(
DpiScalingHelperMixinWithoutInit,
gui.contextHelp.ContextHelpMixin,
wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO
):
helpId = "ElementsList"
ELEMENT_TYPES = (
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("link", _("Lin&ks")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("heading", _("&Headings")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("formField", _("&Form fields")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("button", _("&Buttons")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("landmark", _("Lan&dmarks")),
)
Element = collections.namedtuple("Element", ("item", "parent"))
lastSelectedElementType=0
def __init__(self, document):
super().__init__(
parent=gui.mainFrame,
# Translators: The title of the browse mode Elements List dialog.
title=_("Elements List")
)
self.document = document
mainSizer = wx.BoxSizer(wx.VERTICAL)
contentsSizer = wx.BoxSizer(wx.VERTICAL)
# Translators: The label of a group of radio buttons to select the type of element
# in the browse mode Elements List dialog.
child = wx.RadioBox(self, wx.ID_ANY, label=_("Type:"), choices=tuple(et[1] for et in self.ELEMENT_TYPES))
child.SetSelection(self.lastSelectedElementType)
child.Bind(wx.EVT_RADIOBOX, self.onElementTypeChange)
contentsSizer.Add(child, flag=wx.EXPAND)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
self.tree = wx.TreeCtrl(
self,
size=self.scaleSize((500, 300)), # height is chosen to ensure the dialog will fit on an 800x600 screen
style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_LINES_AT_ROOT | wx.TR_SINGLE | wx.TR_EDIT_LABELS
)
self.tree.Bind(wx.EVT_SET_FOCUS, self.onTreeSetFocus)
self.tree.Bind(wx.EVT_CHAR, self.onTreeChar)
self.tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.onTreeLabelEditBegin)
self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.onTreeLabelEditEnd)
self.treeRoot = self.tree.AddRoot("root")
contentsSizer.Add(self.tree,flag=wx.EXPAND)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
# Translators: The label of an editable text field to filter the elements
# in the browse mode Elements List dialog.
filterText = _("Filter b&y:")
labeledCtrl = gui.guiHelper.LabeledControlHelper(self, filterText, wx.TextCtrl)
self.filterEdit = labeledCtrl.control
self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange)
contentsSizer.Add(labeledCtrl.sizer)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
bHelper = gui.guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The label of a button to activate an element in the browse mode Elements List dialog.
# Beware not to set an accelerator that would collide with other controls in this dialog, such as an
# element type radio label.
self.activateButton = bHelper.addButton(self, label=_("Activate"))
self.activateButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(True))
# Translators: The label of a button to move to an element
# in the browse mode Elements List dialog.
self.moveButton = bHelper.addButton(self, label=_("&Move to"))
self.moveButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(False))
bHelper.addButton(self, id=wx.ID_CANCEL)
contentsSizer.Add(bHelper.sizer, flag=wx.ALIGN_RIGHT)
mainSizer.Add(contentsSizer, border=gui.guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.tree.SetFocus()
self.initElementType(self.ELEMENT_TYPES[self.lastSelectedElementType][0])
self.CentreOnScreen()
def onElementTypeChange(self, evt):
elementType=evt.GetInt()
# We need to make sure this gets executed after the focus event.
# Otherwise, NVDA doesn't seem to get the event.
queueHandler.queueFunction(queueHandler.eventQueue, self.initElementType, self.ELEMENT_TYPES[elementType][0])
self.lastSelectedElementType=elementType
def initElementType(self, elType):
if elType in ("link","button"):
# Links and buttons can be activated.
self.activateButton.Enable()
self.SetAffirmativeId(self.activateButton.GetId())
else:
# No other element type can be activated.
self.activateButton.Disable()
self.SetAffirmativeId(self.moveButton.GetId())
# Gather the elements of this type.
self._elements = []
self._initialElement = None
parentElements = []
isAfterSelection=False
for item in self.document._iterNodesByType(elType):
# Find the parent element, if any.
for parent in reversed(parentElements):
if item.isChild(parent.item):
break
else:
# We're not a child of this parent, so this parent has no more children and can be removed from the stack.
parentElements.pop()
else:
# No parent found, so we're at the root.
# Note that parentElements will be empty at this point, as all parents are no longer relevant and have thus been removed from the stack.
parent = None
element=self.Element(item,parent)
self._elements.append(element)
if not isAfterSelection:
isAfterSelection=item.isAfterSelection
if not isAfterSelection:
# The element immediately preceding or overlapping the caret should be the initially selected element.
# Since we have not yet passed the selection, use this as the initial element.
try:
self._initialElement = self._elements[-1]
except IndexError:
# No previous element.
pass
# This could be the parent of a subsequent element, so add it to the parents stack.
parentElements.append(element)
# Start with no filtering.
self.filterEdit.ChangeValue("")
self.filter("", newElementType=True)
def filter(self, filterText, newElementType=False):
# If this is a new element type, use the element nearest the cursor.
# Otherwise, use the currently selected element.
# #8753: wxPython 4 returns "invalid tree item" when the tree view is empty, so use initial element if appropriate.
try:
defaultElement = self._initialElement if newElementType else self.tree.GetItemData(self.tree.GetSelection())
except:
defaultElement = self._initialElement
# Clear the tree.
self.tree.DeleteChildren(self.treeRoot)
# Populate the tree with elements matching the filter text.
elementsToTreeItems = {}
defaultItem = None
matched = False
#Do case-insensitive matching by lowering both filterText and each element's text.
filterText=filterText.lower()
for element in self._elements:
label=element.item.label
if filterText and filterText not in label.lower():
continue
matched = True
parent = element.parent
if parent:
parent = elementsToTreeItems.get(parent)
item = self.tree.AppendItem(parent or self.treeRoot, label)
self.tree.SetItemData(item, element)
elementsToTreeItems[element] = item
if element == defaultElement:
defaultItem = item
self.tree.ExpandAll()
if not matched:
# No items, so disable the buttons.
self.activateButton.Disable()
self.moveButton.Disable()
return
# If there's no default item, use the first item in the tree.
self.tree.SelectItem(defaultItem or self.tree.GetFirstChild(self.treeRoot)[0])
# Enable the button(s).
# If the activate button isn't the default button, it is disabled for this element type and shouldn't be enabled here.
if self.AffirmativeId == self.activateButton.Id:
self.activateButton.Enable()
self.moveButton.Enable()
def onTreeSetFocus(self, evt):
# Start with no search.
self._searchText = ""
self._searchCallLater = None
evt.Skip()
def onTreeChar(self, evt):
key = evt.KeyCode
if key == wx.WXK_RETURN:
# The enter key should be propagated to the dialog and thus activate the default button,
# but this is broken (wx ticket #3725).
# Therefore, we must catch the enter key here.
# Activate the current default button.
evt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_ANY)
button = self.FindWindowById(self.AffirmativeId)
if button.Enabled:
button.ProcessEvent(evt)
else:
wx.Bell()
elif key == wx.WXK_F2:
item=self.tree.GetSelection()
if item:
selectedItemType=self.tree.GetItemData(item).item
self.tree.EditLabel(item)
evt.Skip()
elif key >= wx.WXK_START or key == wx.WXK_BACK:
# Non-printable character.
self._searchText = ""
evt.Skip()
else:
# Search the list.
# We have to implement this ourselves, as tree views don't accept space as a search character.
char = chr(evt.UnicodeKey).lower()
# IF the same character is typed twice, do the same search.
if self._searchText != char:
self._searchText += char
if self._searchCallLater:
self._searchCallLater.Restart()
else:
self._searchCallLater = wx.CallLater(1000, self._clearSearchText)
self.search(self._searchText)
def onTreeLabelEditBegin(self,evt):
item=self.tree.GetSelection()
selectedItemType = self.tree.GetItemData(item).item
if not selectedItemType.isRenameAllowed:
evt.Veto()
def onTreeLabelEditEnd(self,evt):
selectedItemNewName=evt.GetLabel()
item=self.tree.GetSelection()
selectedItemType = self.tree.GetItemData(item).item
selectedItemType.rename(selectedItemNewName)
def _clearSearchText(self):
self._searchText = ""
def search(self, searchText):
item = self.tree.GetSelection()
if not item:
# No items.
return
# First try searching from the current item.
# Failing that, search from the first item.
items = itertools.chain(self._iterReachableTreeItemsFromItem(item), self._iterReachableTreeItemsFromItem(self.tree.GetFirstChild(self.treeRoot)[0]))
if len(searchText) == 1:
# If only a single character has been entered, skip (search after) the current item.
next(items)
for item in items:
if self.tree.GetItemText(item).lower().startswith(searchText):
self.tree.SelectItem(item)
return
# Not found.
wx.Bell()
def _iterReachableTreeItemsFromItem(self, item):
while item:
yield item
childItem = self.tree.GetFirstChild(item)[0]
if childItem and self.tree.IsExpanded(item):
# Has children and is reachable, so recurse.
for childItem in self._iterReachableTreeItemsFromItem(childItem):
yield childItem
item = self.tree.GetNextSibling(item)
def onFilterEditTextChange(self, evt):
self.filter(self.filterEdit.GetValue())
evt.Skip()
def onAction(self, activate):
prevFocus = gui.mainFrame.prevFocus
self.Close()
# Save off the last selected element type on to the class so its used in initialization next time.
self.__class__.lastSelectedElementType=self.lastSelectedElementType
item = self.tree.GetSelection()
item = self.tree.GetItemData(item).item
if activate:
item.activate()
else:
def move():
speech.cancelSpeech()
# Avoid double announce if item.obj is about to gain focus.
if not (
self.document.passThrough
and getattr(item, "obj", False)
and item.obj != prevFocus
and controlTypes.State.FOCUSABLE in item.obj.states
):
# #8831: Report before moving because moving might change the focus, which
# might mutate the document, potentially invalidating info if it is
# offset-based.
item.report()
item.moveTo()
# We must use core.callLater rather than wx.CallLater to ensure that the callback runs within NVDA's core pump.
# If it didn't, and it directly or indirectly called wx.Yield, it could start executing NVDA's core pump from within the yield, causing recursion.
core.callLater(100, move)
class BrowseModeDocumentTextInfo(textInfos.TextInfo):
def _get_focusableNVDAObjectAtStart(self):
try:
item = next(self.obj._iterNodesByType("focusable", "up", self))
except StopIteration:
return self.obj.rootNVDAObject
if not item:
return self.obj.rootNVDAObject
return item.obj
class BrowseModeDocumentTreeInterceptor(documentBase.DocumentWithTableNavigation,cursorManager.CursorManager,BrowseModeTreeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor):
programmaticScrollMayFireEvent = False
def __init__(self,obj):
super(BrowseModeDocumentTreeInterceptor,self).__init__(obj)
self._lastProgrammaticScrollTime = None
self.documentConstantIdentifier = self.documentConstantIdentifier
self._lastFocusObj = None
self._objPendingFocusBeforeActivate = None
self._hadFirstGainFocus = False
self._enteringFromOutside = True
# We need to cache this because it will be unavailable once the document dies.
if not hasattr(self.rootNVDAObject.appModule, "_browseModeRememberedCaretPositions"):
self.rootNVDAObject.appModule._browseModeRememberedCaretPositions = {}
self._lastCaretPosition = None
#: True if the last caret move was due to a focus change.
self._lastCaretMoveWasFocus = False
def terminate(self):
if self.shouldRememberCaretPositionAcrossLoads and self._lastCaretPosition:
try:
self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] = self._lastCaretPosition
except AttributeError:
# | |
#!/usr/bin/env python3
"""
Compute the embeddings for every task and store to disk.
Since many tasks might be too large to store in GPU memory (or even
CPU memory), and because Wavenet-like models will be expensive at
inference time, we cache all embeddings to disk.
One benefit of this approach is that since all embeddings are cached
as numpy arrays, the final training code can be pytorch-only,
regardless of whether the embedding model is tensorflow based.
TODO:
* Ideally, we would run this within a docker container, for
security. https://github.com/neuralaudio/hear2021-eval-kit/issues/51
* Profiling should occur here (both embedding time AFTER loading
to GPU, and complete wall time include disk writes).
* This is currently pytorch only.
https://github.com/neuralaudio/hear2021-eval-kit/issues/52
Using the included get_audio_embedding_numpy, we could instead
have this work both for pytorch and tensorflow.
https://github.com/neuralaudio/hear2021-eval-kit/issues/49
"""
import json
import os.path
import pickle
import random
import shutil
from importlib import import_module
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import soundfile as sf
import tensorflow as tf
import torch
from intervaltree import IntervalTree
from torch.utils.data import DataLoader, Dataset
from tqdm.auto import tqdm
# import wandb
import heareval.gpu_max_mem as gpu_max_mem
TORCH = "torch"
TENSORFLOW = "tf"
class Embedding:
"""
A wrapper class to help with loading embedding models and computing embeddings
using the HEAR 2021 API for both torch and tensorflow models.
# TODO: Still need to implement and test this with TensorFlow
Args:
module_name: the import name for the embedding module
model_path: location to load the model from
"""
def __init__(
self,
module_name: str,
model_path: str = None,
model_options: Optional[Dict[str, Any]] = None,
):
print(f"Importing {module_name}")
self.module = import_module(module_name)
if model_options is None:
model_options = {}
# Load the model using the model weights path if they were provided
if model_path is not None:
print(f"Loading model using: {model_path}")
self.model = self.module.load_model(model_path, **model_options) # type: ignore
else:
self.model = self.module.load_model(**model_options) # type: ignore
# Check to see what type of model this is: torch or tensorflow
if isinstance(self.model, torch.nn.Module):
self.type = TORCH
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model.to(self.device)
elif isinstance(self.model, tf.Module):
self.type = TENSORFLOW
# Tensorflow automatically manages data transfers to device,
# so we don't need to set self.device
else:
raise TypeError(f"Unsupported model type received: {type(self.model)}")
@property
def name(self):
# TODO: would be nice to include version in this string, a versioned string.
# Potentially can set a version from the command line too to help with testing
# the same model but with difference versions of the weights.
return self.module.__name__
@property
def sample_rate(self):
return self.model.sample_rate
def as_tensor(self, x: Union[np.ndarray, torch.Tensor]):
if self.type == TORCH:
# Load array as tensor onto device
if isinstance(x, np.ndarray):
x = torch.tensor(x, device=self.device)
elif isinstance(x, torch.Tensor):
x = x.to(self.device)
else:
raise TypeError(
"Input must be one of np.ndarray or torch.Tensor for"
f"torch audio embedding models. "
f"Received: {type(x)}"
)
elif self.type == TENSORFLOW:
# Load array as tensor onto device
if not isinstance(x, np.ndarray):
x = x.numpy()
x = tf.convert_to_tensor(x)
else:
raise AssertionError("Unknown type")
return x
def get_scene_embedding_as_numpy(
self, audio: Union[np.ndarray, torch.Tensor]
) -> np.ndarray:
audio = self.as_tensor(audio)
if self.type == TORCH:
with torch.no_grad():
embeddings = self.module.get_scene_embeddings( # type: ignore
audio, self.model
)
return embeddings.detach().cpu().numpy()
elif self.type == TENSORFLOW:
embeddings = self.module.get_scene_embeddings( # type: ignore
audio, self.model
)
return embeddings.numpy()
else:
raise NotImplementedError("Unknown type")
def get_timestamp_embedding_as_numpy(
self, audio: Union[np.ndarray, torch.Tensor]
) -> Tuple[np.ndarray, np.ndarray]:
audio = self.as_tensor(audio)
if self.type == TORCH:
with torch.no_grad():
# flake8: noqa
embeddings, timestamps = self.module.get_timestamp_embeddings( # type: ignore
audio,
self.model,
)
gpu_max_mem.measure()
embeddings = embeddings.detach().cpu().numpy()
timestamps = timestamps.detach().cpu().numpy()
return embeddings, timestamps
elif self.type == TENSORFLOW:
# flake8: noqa
embeddings, timestamps = self.module.get_timestamp_embeddings( # type: ignore
audio,
self.model,
)
gpu_max_mem.measure()
embeddings = embeddings.numpy()
timestamps = timestamps.numpy()
return embeddings, timestamps
else:
raise NotImplementedError("Unknown type")
class AudioFileDataset(Dataset):
"""
Read in a JSON file and return audio and audio filenames
"""
def __init__(self, data: Dict, audio_dir: Path, sample_rate: int):
self.filenames = list(data.keys())
self.audio_dir = audio_dir
assert self.audio_dir.is_dir(), f"{audio_dir} is not a directory"
self.sample_rate = sample_rate
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
# Load in audio here in the Dataset. When the batch size is larger than
# 1 then the torch dataloader can take advantage of multiprocessing.
audio_path = self.audio_dir.joinpath(self.filenames[idx])
audio, sr = sf.read(str(audio_path), dtype=np.float32)
assert sr == self.sample_rate
return audio, self.filenames[idx]
def get_dataloader_for_embedding(
data: Dict, audio_dir: Path, embedding: Embedding, batch_size: int = 64
):
if embedding.type == TORCH or embedding.type == TENSORFLOW:
return DataLoader(
AudioFileDataset(data, audio_dir, embedding.sample_rate),
batch_size=batch_size,
shuffle=False,
)
else:
raise AssertionError("Unknown embedding type")
def save_scene_embedding_and_labels(
embeddings: np.ndarray, labels: List[Dict], filenames: Tuple[str], outdir: Path
):
assert len(embeddings) == len(filenames)
assert len(labels) == len(filenames)
for i, filename in enumerate(filenames):
out_file = outdir.joinpath(f"{filename}")
np.save(f"{out_file}.embedding.npy", embeddings[i])
json.dump(labels[i], open(f"{out_file}.target-labels.json", "w"))
def save_timestamp_embedding_and_labels(
embeddings: np.ndarray,
timestamps: np.ndarray,
labels: np.ndarray,
filename: Tuple[str],
outdir: Path,
):
for i, file in enumerate(filename):
out_file = outdir.joinpath(f"{file}")
np.save(f"{out_file}.embedding.npy", embeddings[i])
assert len(timestamps[i].shape) == 1
json.dump(timestamps[i].tolist(), open(f"{out_file}.timestamps.json", "w"))
json.dump(labels[i], open(f"{out_file}.target-labels.json", "w"), indent=4)
def get_labels_for_timestamps(labels: List, timestamps: np.ndarray) -> List:
# -> List[List[List[str]]]:
# -> List[List[str]]:
# TODO: Is this function redundant?
# A list of labels present at each timestamp
timestamp_labels = []
# NOTE: Make sure dataset events are specified in ms.
assert len(labels) == len(timestamps)
for i, label in enumerate(labels):
tree = IntervalTree()
# Add all events to the label tree
for event in label:
# We add 0.0001 so that the end also includes the event
tree.addi(event["start"], event["end"] + 0.0001, event["label"])
labels_for_sound = []
# Update the binary vector of labels with intervals for each timestamp
for j, t in enumerate(timestamps[i]):
interval_labels: List[str] = [interval.data for interval in tree[t]]
labels_for_sound.append(interval_labels)
# If we want to store the timestamp too
# labels_for_sound.append([float(t), interval_labels])
timestamp_labels.append(labels_for_sound)
assert len(timestamp_labels) == len(timestamps)
return timestamp_labels
def memmap_embeddings(
outdir: Path,
prng: random.Random,
metadata: Dict,
split_name: str,
embed_task_dir: Path,
):
"""
Memmap all the embeddings to one file, and pickle all the labels.
(We assume labels can fit in memory.)
TODO: This writes things to disk double, we could clean that up after.
We might also be able to get away with writing to disk only once.
"""
embedding_files = list(outdir.glob("*.embedding.npy"))
prng.shuffle(embedding_files)
# First count the number of embeddings total
nembeddings = 0
ndim: int
for embedding_file in tqdm(embedding_files):
emb = np.load(embedding_file).astype(np.float32)
if metadata["embedding_type"] == "scene":
assert emb.ndim == 1
nembeddings += 1
ndim = emb.shape[0]
assert emb.dtype == np.float32
elif metadata["embedding_type"] == "event":
assert emb.ndim == 2
nembeddings += emb.shape[0]
ndim = emb.shape[1]
assert emb.dtype == np.float32
else:
raise ValueError(f"Unknown embedding type: {metadata['embedding_type']}")
open(
embed_task_dir.joinpath(f"{split_name}.embedding-dimensions.json"), "wt"
).write(json.dumps((nembeddings, ndim)))
embedding_memmap = np.memmap(
filename=embed_task_dir.joinpath(f"{split_name}.embeddings.npy"),
dtype=np.float32,
mode="w+",
shape=(nembeddings, ndim),
)
idx = 0
labels = []
filename_timestamps = []
for embedding_file in tqdm(embedding_files):
emb = np.load(embedding_file)
lbl = json.load(
open(str(embedding_file).replace("embedding.npy", "target-labels.json"))
)
if metadata["embedding_type"] == "scene":
assert emb.ndim == 1
embedding_memmap[idx] = emb
# lbl will be a list of labels, make sure that it has exactly one label
# for multiclass problems. Will be a list of zero or more for multilabel.
if metadata["prediction_type"] == "multiclass":
assert len(lbl) == 1
elif metadata["prediction_type"] == "multilabel":
assert isinstance(lbl, list)
else:
NotImplementedError(
"Only multiclass and multilabel prediction types"
f"implemented for scene embeddings. Received {metadata['prediction_type']}"
)
labels.append(lbl)
idx += 1
elif metadata["embedding_type"] == "event":
assert emb.ndim == 2
embedding_memmap[idx : idx + emb.shape[0]] = emb
assert emb.shape[0] == len(lbl)
labels += lbl
timestamps = json.load(
open(str(embedding_file).replace("embedding.npy", "timestamps.json"))
)
slug = str(embedding_file).replace(".embedding.npy", "")
filename_timestamps += [(slug, timestamp) for timestamp in timestamps]
assert emb.shape[0] == len(
timestamps
), f"{emb.shape[0]} != {len(timestamps)}"
assert len(lbl) == len(timestamps), f"{len(lbl)} != {len(timestamps)}"
idx += emb.shape[0]
else:
raise ValueError(f"Unknown embedding type: {metadata['embedding_type']}")
# Write changes to disk
embedding_memmap.flush()
# TODO: Convert labels to indices?
pickle.dump(
labels,
open(
embed_task_dir.joinpath(f"{split_name}.target-labels.pkl"),
"wb",
),
)
if metadata["embedding_type"] == "event":
assert len(labels) == len(filename_timestamps)
open(
embed_task_dir.joinpath(f"{split_name}.filename-timestamps.json"),
"wt",
).write(json.dumps(filename_timestamps, indent=4))
def task_embeddings(
embedding: Embedding,
task_path: Path,
embed_task_dir: Path,
):
prng = random.Random()
prng.seed(0)
metadata_path = task_path.joinpath("task_metadata.json")
metadata = json.load(metadata_path.open())
label_vocab_path = task_path.joinpath("labelvocabulary.csv")
# wandb.init(project="heareval", tags=["embedding", task_name])
# Copy these two files to the embeddings directory,
# so we have everything we need in embeddings for doing downstream
# prediction and | |
<filename>resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/Pmv/hostappInterface/comput_util.py
## Automatically adapted for numpy.oldnumeric Apr 10, 2008 by
##
## Copyright (C) The Scripps Research Institute 2006
##
## Authors: <NAME> <<EMAIL>>
##
## $Header: /opt/cvs/python/packages/share1.5/Pmv/hostappInterface/comput_util.py,v 1.22 2010/11/30 07:23:01 autin Exp $
## $Id: comput_util.py,v 1.22 2010/11/30 07:23:01 autin Exp $
##
##
## utils function use for computation during pattern detection
import numpy.oldnumeric as Numeric
from MolKit.pdbWriter import PdbWriter
#from MolKit.chargeCalculator import KollmanChargeCalculator,GasteigerChargeCalculator
from PyAutoDock.MolecularSystem import MolecularSystem
from PyAutoDock.AutoDockScorer import AutoDock305Scorer, AutoDock4Scorer
#from PyAutoDock.AutoDockScorer import AutoDockTermWeights305, AutoDockTermWeights4
#from PyAutoDock.trilinterp_scorer import TrilinterpScorer,TrilinterpScorer_AD3
from PyAutoDock.scorer import WeightedMultiTerm
from PyAutoDock.electrostatics import Electrostatics
from PyAutoDock.vanDerWaals import VanDerWaals,HydrogenBonding
#from PyAutoDock.vanDerWaals import HydrogenBonding
from PyAutoDock.desolvation import Desolvation
#import warnings
from MolKit.molecule import Atom
#######################MATH FUNCTION##########################################################
def transformedCoordinatesWithMatrice(mol,matrice):
""" for a nodeset, this function returns transformed coordinates.
This function will use the pickedInstance attribute if found.
@type mol: MolKit node
@param mol: the molecule to be transfromed
@type matrice: 4x4array
@param matrice: the matrix to apply to the molecule node
@rtype: array
@return: the transformed list of 3d points from the molecule atom coordinates
"""
vt = []
#transfo = matrice#Numeric.transpose(Numeric.reshape(pat.mat_transfo,(4,4)))
scaleFactor = 1.#pat.scaleFactor
#for node in nodes:
#find all atoms and their coordinates
coords = mol.allAtoms.coords# nodes.findType(Atom).coords
#g = nodes[0].top.geomContainer.geoms['master']
# M1 = g.GetMatrix(g.LastParentBeforeRoot())
# apply the AR transfo matrix
M = matrice#Numeric.dot(transfo,M1)
for pt in coords:
ptx = (M[0][0]*pt[0]+M[0][1]*pt[1]+M[0][2]*pt[2]+M[0][3]) /scaleFactor
pty = (M[1][0]*pt[0]+M[1][1]*pt[1]+M[1][2]*pt[2]+M[1][3]) /scaleFactor
ptz = (M[2][0]*pt[0]+M[2][1]*pt[1]+M[2][2]*pt[2]+M[2][3]) /scaleFactor
vt.append( (ptx, pty, ptz) )
return vt
def rotatePoint(pt,m,ax):
x=pt[0]
y=pt[1]
z=pt[2]
u=ax[0]
v=ax[1]
w=ax[2]
ux=u*x
uy=u*y
uz=u*z
vx=v*x
vy=v*y
vz=v*z
wx=w*x
wy=w*y
wz=w*z
sa=sin(ax[3])
ca=cos(ax[3])
pt[0]=(u*(ux+vy+wz)+(x*(v*v+w*w)-u*(vy+wz))*ca+(-wy+vz)*sa)+ m[0]
pt[1]=(v*(ux+vy+wz)+(y*(u*u+w*w)-v*(ux+wz))*ca+(wx-uz)*sa)+ m[1]
pt[2]=(w*(ux+vy+wz)+(z*(u*u+v*v)-w*(ux+vy))*ca+(-vx+uy)*sa)+ m[2]
return pt
def matrixToEuler(mat):
"""
code from 'http://www.euclideanspace.com/maths/geometry/rotations/conversions/'
notes : this conversion uses conventions as described on page:
'http://www.euclideanspace.com/maths/geometry/rotations/euler/index.htm'
Coordinate System: right hand
Positive angle: right hand
Order of euler angles: heading first, then attitude, then bank
matrix row column ordering:
[m00 m01 m02]
[m10 m11 m12]
[m20 m21 m22]
@type mat: 4x4array
@param mat: the matrix to convert in euler angle (heading,attitude,bank)
@rtype: 3d array
@return: the computed euler angle from the matrice
"""
#Assuming the angles are in radians.
#3,3 matrix m[0:3,0:3]
#return heading,attitude,bank Y,Z,X
import math
if (mat[1][0] > 0.998) : # singularity at north pole
heading = math.atan2(mat[0][2],mat[2][2])
attitude = math.pi/2.
bank = 0
return (heading,attitude,bank)
if (mat[1][0] < -0.998) : # singularity at south pole
heading = math.atan2(mat[0][2],mat[2][2])
attitude = -math.pi/2.
bank = 0
return (heading,attitude,bank)
heading = math.atan2(-mat[2][0],mat[0][0])
bank = math.atan2(-mat[1][2],mat[1][1])
attitude = math.asin(mat[1][0])
if mat[0][0] < 0 :
if (attitude < 0.) and (math.degrees(attitude) > -90.):
attitude = -math.pi-attitude
elif (attitude > 0.) and (math.degrees(attitude) < 90.):
attitude = math.pi-attitude
return (heading,attitude,bank)
def eulerToMatrix(euler): #double heading, double attitude, double bank
"""
code from 'http://www.euclideanspace.com/maths/geometry/rotations/conversions/'.
this conversion uses NASA standard aeroplane conventions as described on page:
'http://www.euclideanspace.com/maths/geometry/rotations/euler/index.htm'
Coordinate System: right hand
Positive angle: right hand
Order of euler angles: heading first, then attitude, then bank
matrix row column ordering:
[m00 m01 m02]
[m10 m11 m12]
[m20 m21 m22]
@type euler: 3d array
@param euler: the euler angle to convert in matrice
@rtype: 4x4array
@return: the matrix computed from the euler angle
"""
# Assuming the angles are in radians.
import math
heading=euler[0]
attitude=euler[1]
bank=euler[2]
m=[[ 1., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.]]
ch = math.cos(heading)
sh = math.sin(heading)
ca = math.cos(attitude)
sa = math.sin(attitude)
cb = math.cos(bank)
sb = math.sin(bank)
m[0][0] = ch * ca
m[0][1] = sh*sb - ch*sa*cb
m[0][2] = ch*sa*sb + sh*cb
m[1][0] = sa
m[1][1] = ca*cb
m[1][2] = -ca*sb
m[2][0] = -sh*ca
m[2][1] = sh*sa*cb + ch*sb
m[2][2] = -sh*sa*sb + ch*cb
return m
rotY90n = Numeric.array([[ 0., 0., 1., 0.],
[ 0., 1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.]],'f')
def ApplyMatrix(coords,mat):
"""
Apply the 4x4 transformation matrix to the given list of 3d points
@type coords: array
@param coords: the list of point to transform.
@type mat: 4x4array
@param mat: the matrix to apply to the 3d points
@rtype: array
@return: the transformed list of 3d points
"""
#4x4matrix"
coords = Numeric.array(coords)
one = Numeric.ones( (coords.shape[0], 1), coords.dtype.char )
c = Numeric.concatenate( (coords, one), 1 )
return Numeric.dot(c, Numeric.transpose(mat))[:, :3]
def Decompose4x4(matrix):
"""
Takes a matrix in shape (16,) in OpenGL form (sequential values go
down columns) and decomposes it into its rotation (shape (16,)),
translation (shape (3,)), and scale (shape (3,))
@type matrix: 4x4array
@param matrix: the matrix to decompose
@rtype: list of array
@return: the decomposition of the matrix ie : rotation,translation,scale
"""
m = matrix
transl = Numeric.array((m[12], m[13], m[14]), 'f')
scale0 = Numeric.sqrt(m[0]*m[0]+m[4]*m[4]+m[8]*m[8])
scale1 = Numeric.sqrt(m[1]*m[1]+m[5]*m[5]+m[9]*m[9])
scale2 = Numeric.sqrt(m[2]*m[2]+m[6]*m[6]+m[10]*m[10])
scale = Numeric.array((scale0,scale1,scale2)).astype('f')
mat = Numeric.reshape(m, (4,4))
rot = Numeric.identity(4).astype('f')
rot[:3,:3] = mat[:3,:3].astype('f')
rot[:,0] = (rot[:,0]/scale0).astype('f')
rot[:,1] = (rot[:,1]/scale1).astype('f')
rot[:,2] = (rot[:,2]/scale2).astype('f')
rot.shape = (16,)
#rot1 = rot.astype('f')
return rot, transl, scale
def rotatePoint(pt,m,ax):
"""
rotate a point (x,y,z) arount an axis by alha degree.
@type pt: point
@param pt: the point to rotate
@type m: array
@param m: translation offset to apply after the rotation
@type ax: vector4D
@param ax: axise of rotation (ax[0:3]) and the angle of rotation (ax[3])
@rtype: point
@return: the new rotated point
"""
x=pt[0]
y=pt[1]
z=pt[2]
u=ax[0]
v=ax[1]
w=ax[2]
ux=u*x
uy=u*y
uz=u*z
vx=v*x
vy=v*y
vz=v*z
wx=w*x
wy=w*y
wz=w*z
sa=sin(ax[3])
ca=cos(ax[3])
pt[0]=(u*(ux+vy+wz)+(x*(v*v+w*w)-u*(vy+wz))*ca+(-wy+vz)*sa)+ m[0]
pt[1]=(v*(ux+vy+wz)+(y*(u*u+w*w)-v*(ux+wz))*ca+(wx-uz)*sa)+ m[1]
pt[2]=(w*(ux+vy+wz)+(z*(u*u+v*v)-w*(ux+vy))*ca+(-vx+uy)*sa)+ m[2]
return pt
def norm(A):
"""Return vector norm"""
return Numeric.sqrt(sum(A*A))
def dist(A,B):
"""Return distnce between point A and point B"""
return Numeric.sqrt((A[0]-B[0])**2+(A[1]-B[1])**2+(A[2]-B[2])**2)
def normsq(A):
"""Return square of vector norm"""
return abs(sum(A*A))
def normalize(A):
"""Normalize the Vector A"""
if (norm(A)==0.0) : return A
else :return A/norm(A)
def getCenter(coords):
"""
Get the center from a 3d array of coordinate x,y,z.
@type coords: liste/array
@param coords: the coordinates
@rtype: list/array
@return: the center of mass of the coordinates
"""
coords = Numeric.array(coords)#self.allAtoms.coords
center = sum(coords)/(len(coords)*1.0)
center = list(center)
for i in range(3):
center[i] = round(center[i], 4)
#print "center =", self.center
return center
def computeRadius(protein,center=None):
"""
Get the radius of gyration of a protein.
@type protein: MolKit Protein
@param protein: the molecule
@type center: list/array
@param center: the center of the molecule
@rtype: float
@return: the radius of the molecule
"""
if center == None : center = protein.getCenter()
rs = 0.
for atom in protein.allAtoms:
r = dist(center,atom._coords[0])
if r > rs:
rs = r
return rs
def convertColor(col,toint=True):
"""
This function will convert a color array [r,g,b] from range 1-255
to range 0.-1 (vice/versa)
@type col: array
@param col: the color [r,g,b]
@type toint: boolean
@param toint: way of the convertion, if true convert to 1-255, if false
convert to range 0-1
@rtype: array
@return: the converted color [0-1.,0-1.,0-1.] or [1-255,1-255,1-255]
"""
if toint and max(col)<=1.0: col = map( lambda x: x*255, col)
elif not toint and max(col)>1.0: col = map( lambda x: x/255., col)
return col
DGatomIds=['ASPOD1','ASPOD2','GLUOE1','GLUOE2', 'SERHG',
'THRHG1','TYROH','TYRHH',
'LYSNZ','LYSHZ1','LYSHZ2','LYSHZ3','ARGNE','ARGNH1','ARGNH2',
'ARGHH11','ARGHH12','ARGHH21','ARGHH22','ARGHE','GLNHE21',
'GLNHE22','GLNHE2',
'ASNHD2','ASNHD21', 'ASNHD22','HISHD1','HISHE2' ,
'CYSHG', 'HN']
def lookupDGFunc(atom):
assert isinstance(atom, Atom)
if atom.name in ['HN']:
atom.atomId = atom.name
else:
atom.atomId=atom.parent.type+atom.name
if atom.atomId not in DGatomIds:
atom.atomId=atom.element
return atom.atomId.upper()
def norm(A):
"Return vector norm"
return Numeric.sqrt(sum(A*A))
def dist(A,B):
return Numeric.sqrt((A[0]-B[0])**2+(A[1]-B[1])**2+(A[2]-B[2])**2)
def normsq(A):
"Return square of vector norm"
return abs(sum(A*A))
def normalize(A):
"Normalize the Vector"
if (norm(A)==0.0) : return A
else :return A/norm(A)
def changeR(txt):
from Pmv.pmvPalettes import RasmolAminoSortedKeys
from MolKit.protein import ResidueSetSelector
#problem this residue not in r_keyD
rname = txt[0:3]
rnum = txt[3:]
if rname not in RasmolAminoSortedKeys :#ResidueSetSelector.r_keyD.keys() :
print rname
rname=rname.replace(" ","")
if len(rname) == 1 :
return rname+rnum
return rname[1]+rnum
else :
rname=rname.replace(" ","")
r1n=ResidueSetSelector.r_keyD[rname]
return r1n+rnum
def patchRasmolAminoColor():
from Pmv.pmvPalettes import RasmolAmino,RasmolAminoSortedKeys
RasmolAminocorrected=RasmolAmino.copy()
for res in RasmolAminoSortedKeys:
name=res.strip()
if name in ['A', 'C', 'G', 'T', 'U']:
name = 'D'+name
RasmolAminocorrected[name]= RasmolAmino[res]
del RasmolAminocorrected[res]
return RasmolAminocorrected
#######################ENERGY CLASS & FUNCTION##########################################################
class EnergyHandler:
""" object to manage the different energies calculation between set of atoms """
def __init__(self,viewer):
self.viewer = viewer
# list the energies instance to manage
self.data = {} # keys name, values: energie classs instance
self.current_scorer = None
self.realTime = True
def add(self,atomset1,atomset2,score_type='c_ad3Score',**kw):
""" a pairs of atoms to get energies between them
should be receptor ligan
"""
# make name from | |
<filename>ea_transformer.py
import math
from functools import partial
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from hilbert import decode, encode
from pyzorder import ZOrderIndexer
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg, overlay_external_default_cfg
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
# from dcn_v2_amp import DCN
from PIL import Image as DCN
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class CrossAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.q = nn.Linear(dim, dim * 1, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, xq):
B, N, C = x.shape
_, Nq, _ = xq.shape
kv = self.kv(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q_ = self.q(xq).reshape(B, Nq, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = q_[0], kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, Nq, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class LocalAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., local_ks=3, length=196):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
mask = torch.ones(length, length)
for i in range(length):
for j in range(i-local_ks//2, i+local_ks//2+1, 1):
j = min(max(0, j), length-1)
mask[i, j] = 0
mask = mask.unsqueeze(0).unsqueeze(1)
self.mask = nn.Parameter(mask, requires_grad=False)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.masked_fill_(self.mask.bool(), -np.inf)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class LocalBranch(nn.Module):
def __init__(self, dim, local_type='conv', local_ks=3, length=196, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.local_type = local_type
if local_type == 'conv':
self.linear = nn.Linear(dim, dim)
self.local = nn.Conv1d(dim, dim, kernel_size=local_ks, padding=local_ks//2, padding_mode='zeros', groups=1)
elif local_type == 'dcn':
self.linear = nn.Linear(dim, dim)
self.local = DCN(dim, dim, kernel_size=(local_ks, 1), stride=1, padding=(local_ks//2, 0), deformable_groups=2)
elif local_type == 'attn':
self.local = LocalAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=proj_drop,
local_ks=local_ks, length=length)
else:
self.local = nn.Identity()
def forward(self, x):
if self.local_type in ['conv']:
x = self.linear(x)
x = x.permute(0, 2, 1)
x = self.local(x)
x = x.permute(0, 2, 1)
return x
elif self.local_type == 'dcn':
x = self.linear(x)
x = x.permute(0, 2, 1).unsqueeze(3).contiguous()
x = self.local(x)
x = x.squeeze(3).permute(0, 2, 1)
return x
elif self.local_type == 'attn':
x = self.local(x)
return x
else:
x = self.local(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-6), local_type='conv', local_ks=3, length=196, local_ratio=0.5, ffn_type='base'):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
if ffn_type == 'base':
MLP = Mlp
else:
raise Exception('invalid ffn_type: {}'.format(ffn_type))
self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Block_local(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU,
norm_layer=partial(nn.LayerNorm, eps=1e-6), local_type='conv', local_ks=3, length=196, local_ratio=0.5, ffn_type='base'):
super().__init__()
local_dim = int(dim * local_ratio)
self.global_dim = dim - local_dim
div = 2
self.num_heads = num_heads // div
self.norm1 = norm_layer(self.global_dim)
self.norm1_local = norm_layer(local_dim)
self.attn = Attention(self.global_dim, num_heads=self.num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.local = LocalBranch(local_dim, local_type=local_type, local_ks=local_ks, length=length,
num_heads=self.num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
if ffn_type == 'base':
MLP = Mlp
else:
raise Exception('invalid ffn_type: {}'.format(ffn_type))
self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x): # torch.Size([64, 257, 192])
x_attn = self.drop_path(self.attn(self.norm1(x[:, :, :self.global_dim])))
x_local = self.drop_path(self.local(self.norm1_local(x[:, :, self.global_dim:])))
x = x + torch.cat([x_attn, x_local], dim=2)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Block_cls(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-6), local_type='conv', local_ks=3, local_ratio=0.5, ffn_type='base'):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = CrossAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
if ffn_type == 'base':
MLP = Mlp
else:
raise Exception('invalid ffn_type: {}'.format(ffn_type))
self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, xq):
xq = xq + self.drop_path(self.attn(x, self.norm1(xq)))
xq = xq + self.drop_path(self.mlp(self.norm2(xq)))
return xq
class LocED(nn.Module):
def __init__(self, size=16, size_p=1, dim=2, loc_encoder='sis'):
super().__init__()
size = int(size)
if loc_encoder in ['zorder', 'hilbert']:
if size & (size - 1) != 0:
raise 'invalid size \'{}\' for \'{}\' mode'.format(size, loc_encoder)
if loc_encoder in ['sis']:
if size_p == 1:
raise 'invalid size \'{}\' for \'{}\' mode'.format(size_p, loc_encoder)
max_num = size ** dim
indexes = np.arange(max_num)
if 'sweep' == loc_encoder: # ['sweep', 'scan', 'zorder', 'hilbert', 'sis']
locs_flat = indexes
elif 'scan' == loc_encoder:
indexes = indexes.reshape(size, size)
for i in np.arange(1, size, step=2):
indexes[i, :] = indexes[i, :][::-1]
locs_flat = indexes.reshape(-1)
elif 'zorder' == loc_encoder:
zi = ZOrderIndexer((0, size - 1), (0, size - 1))
locs_flat = []
for z in indexes:
r, c = zi.rc(int(z))
locs_flat.append(c * size + r)
locs_flat = np.array(locs_flat)
elif 'hilbert' == loc_encoder:
bit = int(math.log2(size))
locs = decode(indexes, dim, bit)
locs_flat = self.flat_locs_hilbert(locs, dim, bit)
elif 'sis' == loc_encoder:
locs_flat = []
axis_patches = size // size_p
for i in range(axis_patches):
for j in range(axis_patches):
for ii in range(size_p):
for jj in range(size_p):
locs_flat.append((size_p * i + ii) * size + (size_p * j + jj))
locs_flat = np.array(locs_flat)
else:
raise Exception('invalid encoder mode')
locs_flat_inv = np.argsort(locs_flat)
index_flat = torch.LongTensor(locs_flat.astype(np.int64)).unsqueeze(0).unsqueeze(2)
index_flat_inv = torch.LongTensor(locs_flat_inv.astype(np.int64)).unsqueeze(0).unsqueeze(2)
self.index_flat = nn.Parameter(index_flat, requires_grad=False)
self.index_flat_inv = nn.Parameter(index_flat_inv, requires_grad=False)
def flat_locs_hilbert(self, locs, num_dim, num_bit):
ret = []
l = 2 ** num_bit
for i in range(len(locs)):
loc = locs[i]
loc_flat = 0
for j in range(num_dim):
loc_flat += loc[j] * (l ** j)
ret.append(loc_flat)
return np.array(ret).astype(np.uint64)
def __call__(self, img):
img_encode = self.encode(img)
return img_encode
def encode(self, img):
img_encode = torch.zeros(img.shape, dtype=img.dtype, device=img.device).scatter_(1, self.index_flat_inv.expand(img.shape), img)
return img_encode
def decode(self, img):
img_decode = torch.zeros(img.shape, dtype=img.dtype, device=img.device).scatter_(1, self.index_flat.expand(img.shape), img)
return img_decode
class EATransformer(nn.Module):
def __init__(self, img_size=256, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, depth_cls=2,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6),
pos_emb=True, cls_token=False, cls_token_head=True, loc_encoder='sis', block_type='base_local', local_type='conv',
local_ks=3, local_ratio=0.5, ffn_type='base', sfc_mode='first'):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
self.cls_token_ = cls_token
self.cls_token_head_ = cls_token_head
self.sfc_mode = sfc_mode
axis_patches = img_size // patch_size
num_patches = axis_patches ** 2
self.num_patches = num_patches
if sfc_mode == 'first':
self.loc_encoder = LocED(size=img_size, size_p=patch_size, dim=2, loc_encoder=loc_encoder)
self.patch_embed = nn.Conv1d(in_chans, embed_dim, kernel_size=patch_size ** 2, stride=patch_size ** 2)
elif sfc_mode == 'second':
self.loc_encoder | |
``day_of_month='1-7,15-21'`` (for the first and
third weeks of the month).
.. attribute:: month_of_year
- A (list of) integers from 1-12 that represents the months of
the year during which execution can occur.
- A string representing a Crontab pattern. This may get pretty
advanced, such as ``month_of_year='*/3'`` (for the first month
of every quarter) or ``month_of_year='2-12/2'`` (for every even
numbered month).
.. attribute:: nowfun
Function returning the current date and time
(:class:`~datetime.datetime`).
.. attribute:: app
The Celery app instance.
It's important to realize that any day on which execution should
occur must be represented by entries in all three of the day and
month attributes. For example, if ``day_of_week`` is 0 and
``day_of_month`` is every seventh day, only months that begin
on Sunday and are also in the ``month_of_year`` attribute will have
execution events. Or, ``day_of_week`` is 1 and ``day_of_month``
is '1-7,15-21' means every first and third Monday of every month
present in ``month_of_year``.
"""
def __init__(self, minute='*', hour='*', day_of_week='*',
day_of_month='*', month_of_year='*', **kwargs):
self._orig_minute = cronfield(minute)
self._orig_hour = cronfield(hour)
self._orig_day_of_week = cronfield(day_of_week)
self._orig_day_of_month = cronfield(day_of_month)
self._orig_month_of_year = cronfield(month_of_year)
self.hour = self._expand_cronspec(hour, 24)
self.minute = self._expand_cronspec(minute, 60)
self.day_of_week = self._expand_cronspec(day_of_week, 7)
self.day_of_month = self._expand_cronspec(day_of_month, 31, 1)
self.month_of_year = self._expand_cronspec(month_of_year, 12, 1)
super(crontab, self).__init__(**kwargs)
@staticmethod
def _expand_cronspec(cronspec, max_, min_=0):
"""Expand cron specification.
Takes the given cronspec argument in one of the forms:
.. code-block:: text
int (like 7)
str (like '3-5,*/15', '*', or 'monday')
set (like {0,15,30,45}
list (like [8-17])
And convert it to an (expanded) set representing all time unit
values on which the Crontab triggers. Only in case of the base
type being :class:`str`, parsing occurs. (It's fast and
happens only once for each Crontab instance, so there's no
significant performance overhead involved.)
For the other base types, merely Python type conversions happen.
The argument ``max_`` is needed to determine the expansion of
``*`` and ranges. The argument ``min_`` is needed to determine
the expansion of ``*`` and ranges for 1-based cronspecs, such as
day of month or month of year. The default is sufficient for minute,
hour, and day of week.
"""
if isinstance(cronspec, numbers.Integral):
result = {cronspec}
elif isinstance(cronspec, string_t):
result = crontab_parser(max_, min_).parse(cronspec)
elif isinstance(cronspec, set):
result = cronspec
elif isinstance(cronspec, Iterable):
result = set(cronspec)
else:
raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec)))
# assure the result does not preceed the min or exceed the max
for number in result:
if number >= max_ + min_ or number < min_:
raise ValueError(CRON_PATTERN_INVALID.format(
min=min_, max=max_ - 1 + min_, value=number))
return result
def _delta_to_next(self, last_run_at, next_hour, next_minute):
"""Find next delta.
Takes a :class:`~datetime.datetime` of last run, next minute and hour,
and returns a :class:`~celery.utils.time.ffwd` for the next
scheduled day and time.
Only called when ``day_of_month`` and/or ``month_of_year``
cronspec is specified to further limit scheduled task execution.
"""
datedata = AttributeDict(year=last_run_at.year)
days_of_month = sorted(self.day_of_month)
months_of_year = sorted(self.month_of_year)
def day_out_of_range(year, month, day):
try:
datetime(year=year, month=month, day=day)
except ValueError:
return True
return False
def roll_over():
for _ in range(2000):
flag = (datedata.dom == len(days_of_month) or
day_out_of_range(datedata.year,
months_of_year[datedata.moy],
days_of_month[datedata.dom]) or
(self.maybe_make_aware(datetime(datedata.year,
months_of_year[datedata.moy],
days_of_month[datedata.dom])) < last_run_at))
if flag:
datedata.dom = 0
datedata.moy += 1
if datedata.moy == len(months_of_year):
datedata.moy = 0
datedata.year += 1
else:
break
else:
# Tried 2000 times, we're most likely in an infinite loop
raise RuntimeError('unable to rollover, '
'time specification is probably invalid')
if last_run_at.month in self.month_of_year:
datedata.dom = bisect(days_of_month, last_run_at.day)
datedata.moy = bisect_left(months_of_year, last_run_at.month)
else:
datedata.dom = 0
datedata.moy = bisect(months_of_year, last_run_at.month)
if datedata.moy == len(months_of_year):
datedata.moy = 0
roll_over()
while 1:
th = datetime(year=datedata.year,
month=months_of_year[datedata.moy],
day=days_of_month[datedata.dom])
if th.isoweekday() % 7 in self.day_of_week:
break
datedata.dom += 1
roll_over()
return ffwd(year=datedata.year,
month=months_of_year[datedata.moy],
day=days_of_month[datedata.dom],
hour=next_hour,
minute=next_minute,
second=0,
microsecond=0)
def __repr__(self):
return CRON_REPR.format(self)
def __reduce__(self):
return (self.__class__, (self._orig_minute,
self._orig_hour,
self._orig_day_of_week,
self._orig_day_of_month,
self._orig_month_of_year), None)
def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd):
# pylint: disable=redefined-outer-name
# caching global ffwd
tz = tz or self.tz
last_run_at = self.maybe_make_aware(last_run_at)
now = self.maybe_make_aware(self.now())
dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7
execute_this_date = (
last_run_at.month in self.month_of_year and
last_run_at.day in self.day_of_month and
dow_num in self.day_of_week
)
execute_this_hour = (
execute_this_date and
last_run_at.day == now.day and
last_run_at.month == now.month and
last_run_at.year == now.year and
last_run_at.hour in self.hour and
last_run_at.minute < max(self.minute)
)
if execute_this_hour:
next_minute = min(minute for minute in self.minute
if minute > last_run_at.minute)
delta = ffwd(minute=next_minute, second=0, microsecond=0)
else:
next_minute = min(self.minute)
execute_today = (execute_this_date and
last_run_at.hour < max(self.hour))
if execute_today:
next_hour = min(hour for hour in self.hour
if hour > last_run_at.hour)
delta = ffwd(hour=next_hour, minute=next_minute,
second=0, microsecond=0)
else:
next_hour = min(self.hour)
all_dom_moy = (self._orig_day_of_month == '*' and
self._orig_month_of_year == '*')
if all_dom_moy:
next_day = min([day for day in self.day_of_week
if day > dow_num] or self.day_of_week)
add_week = next_day == dow_num
delta = ffwd(
weeks=add_week and 1 or 0,
weekday=(next_day - 1) % 7,
hour=next_hour,
minute=next_minute,
second=0,
microsecond=0,
)
else:
delta = self._delta_to_next(last_run_at,
next_hour, next_minute)
return self.to_local(last_run_at), delta, self.to_local(now)
def remaining_estimate(self, last_run_at, ffwd=ffwd):
"""Estimate of next run time.
Returns when the periodic task should run next as a
:class:`~datetime.timedelta`.
"""
# pylint: disable=redefined-outer-name
# caching global ffwd
return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd))
def is_due(self, last_run_at):
"""Return tuple of ``(is_due, next_time_to_run)``.
Note:
Next time to run is in seconds.
SeeAlso:
:meth:`celery.schedules.schedule.is_due` for more information.
"""
rem_delta = self.remaining_estimate(last_run_at)
rem = max(rem_delta.total_seconds(), 0)
due = rem == 0
if due:
rem_delta = self.remaining_estimate(self.now())
rem = max(rem_delta.total_seconds(), 0)
return schedstate(due, rem)
def __eq__(self, other):
if isinstance(other, crontab):
return (
other.month_of_year == self.month_of_year and
other.day_of_month == self.day_of_month and
other.day_of_week == self.day_of_week and
other.hour == self.hour and
other.minute == self.minute
)
return NotImplemented
def __ne__(self, other):
res = self.__eq__(other)
if res is NotImplemented:
return True
return not res
def maybe_schedule(s, relative=False, app=None):
"""Return schedule from number, timedelta, or actual schedule."""
if s is not None:
if isinstance(s, numbers.Number):
s = timedelta(seconds=s)
if isinstance(s, timedelta):
return schedule(s, relative, app=app)
else:
s.app = app
return s
@python_2_unicode_compatible
class solar(BaseSchedule):
"""Solar event.
A solar event can be used as the ``run_every`` value of a
periodic task entry to schedule based on certain solar events.
Notes:
Available event valus are:
- ``dawn_astronomical``
- ``dawn_nautical``
- ``dawn_civil``
- ``sunrise``
- ``solar_noon``
- ``sunset``
- ``dusk_civil``
- ``dusk_nautical``
- ``dusk_astronomical``
Arguments:
event (str): Solar event that triggers this task.
See note for available values.
lat (int): The latitude of the observer.
lon (int): The longitude of the observer.
nowfun (Callable): Function returning the current date and time
as a class:`~datetime.datetime`.
app (~@Celery): Celery app instance.
"""
_all_events = {
'dawn_astronomical',
'dawn_nautical',
'dawn_civil',
'sunrise',
'solar_noon',
'sunset',
'dusk_civil',
'dusk_nautical',
'dusk_astronomical',
}
_horizons = {
'dawn_astronomical': '-18',
'dawn_nautical': '-12',
'dawn_civil': '-6',
'sunrise': '-0:34',
'solar_noon': '0',
'sunset': '-0:34',
'dusk_civil': '-6',
'dusk_nautical': '-12',
'dusk_astronomical': '18',
}
_methods = {
'dawn_astronomical': 'next_rising',
'dawn_nautical': 'next_rising',
'dawn_civil': 'next_rising',
'sunrise': 'next_rising',
'solar_noon': 'next_transit',
'sunset': 'next_setting',
'dusk_civil': 'next_setting',
'dusk_nautical': 'next_setting',
'dusk_astronomical': 'next_setting',
}
_use_center_l = {
'dawn_astronomical': True,
'dawn_nautical': True,
'dawn_civil': True,
'sunrise': False,
'solar_noon': True,
'sunset': False,
'dusk_civil': True,
'dusk_nautical': True,
'dusk_astronomical': True,
}
def __init__(self, event, lat, lon, **kwargs):
self.ephem = __import__('ephem')
self.event = event
self.lat = lat
self.lon = lon
super(solar, self).__init__(**kwargs)
if event not in self._all_events:
raise ValueError(SOLAR_INVALID_EVENT.format(
event=event, all_events=', '.join(sorted(self._all_events)),
))
if lat < -90 or lat > 90:
raise ValueError(SOLAR_INVALID_LATITUDE.format(lat=lat))
if lon < -180 or lon > 180:
raise ValueError(SOLAR_INVALID_LONGITUDE.format(lon=lon))
cal = self.ephem.Observer()
cal.lat = str(lat)
cal.lon = str(lon)
cal.elev = 0
cal.horizon = self._horizons[event]
cal.pressure = 0
self.cal = cal
self.method = self._methods[event]
self.use_center = self._use_center_l[event]
def __reduce__(self):
return self.__class__, (self.event, self.lat, self.lon)
def __repr__(self):
return '<solar: {0} at latitude {1}, longitude: {2}>'.format(
self.event, self.lat, self.lon,
)
def remaining_estimate(self, last_run_at):
"""Return estimate of next time to run.
Returns:
~datetime.timedelta: when the periodic task should
run next, or if it shouldn't run today (e.g., the sun does
not rise today), returns the time when the next check
should take place.
"""
last_run_at = self.maybe_make_aware(last_run_at)
last_run_at_utc = localize(last_run_at, timezone.utc)
self.cal.date = last_run_at_utc
try:
next_utc = getattr(self.cal, self.method)(
| |
Parameters:
- path
"""
self.send_listStatus(path)
return self.recv_listStatus()
def send_listStatus(self, path):
self._oprot.writeMessageBegin('listStatus', TMessageType.CALL, self._seqid)
args = listStatus_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_listStatus(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = listStatus_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "listStatus failed: unknown result");
def chmod(self, path, mode):
"""
Parameters:
- path
- mode
"""
self.send_chmod(path, mode)
self.recv_chmod()
def send_chmod(self, path, mode):
self._oprot.writeMessageBegin('chmod', TMessageType.CALL, self._seqid)
args = chmod_args()
args.path = path
args.mode = mode
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_chmod(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = chmod_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ouch != None:
raise result.ouch
return
def chown(self, path, owner, group):
"""
Parameters:
- path
- owner
- group
"""
self.send_chown(path, owner, group)
self.recv_chown()
def send_chown(self, path, owner, group):
self._oprot.writeMessageBegin('chown', TMessageType.CALL, self._seqid)
args = chown_args()
args.path = path
args.owner = owner
args.group = group
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_chown(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = chown_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ouch != None:
raise result.ouch
return
def setReplication(self, path, replication):
"""
Parameters:
- path
- replication
"""
self.send_setReplication(path, replication)
self.recv_setReplication()
def send_setReplication(self, path, replication):
self._oprot.writeMessageBegin('setReplication', TMessageType.CALL, self._seqid)
args = setReplication_args()
args.path = path
args.replication = replication
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setReplication(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setReplication_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ouch != None:
raise result.ouch
return
def getFileBlockLocations(self, path, start, length):
"""
Parameters:
- path
- start
- length
"""
self.send_getFileBlockLocations(path, start, length)
return self.recv_getFileBlockLocations()
def send_getFileBlockLocations(self, path, start, length):
self._oprot.writeMessageBegin('getFileBlockLocations', TMessageType.CALL, self._seqid)
args = getFileBlockLocations_args()
args.path = path
args.start = start
args.length = length
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getFileBlockLocations(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getFileBlockLocations_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "getFileBlockLocations failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["setInactivityTimeoutPeriod"] = Processor.process_setInactivityTimeoutPeriod
self._processMap["shutdown"] = Processor.process_shutdown
self._processMap["create"] = Processor.process_create
self._processMap["createFile"] = Processor.process_createFile
self._processMap["open"] = Processor.process_open
self._processMap["append"] = Processor.process_append
self._processMap["write"] = Processor.process_write
self._processMap["read"] = Processor.process_read
self._processMap["close"] = Processor.process_close
self._processMap["rm"] = Processor.process_rm
self._processMap["rename"] = Processor.process_rename
self._processMap["mkdirs"] = Processor.process_mkdirs
self._processMap["exists"] = Processor.process_exists
self._processMap["stat"] = Processor.process_stat
self._processMap["listStatus"] = Processor.process_listStatus
self._processMap["chmod"] = Processor.process_chmod
self._processMap["chown"] = Processor.process_chown
self._processMap["setReplication"] = Processor.process_setReplication
self._processMap["getFileBlockLocations"] = Processor.process_getFileBlockLocations
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_setInactivityTimeoutPeriod(self, seqid, iprot, oprot):
args = setInactivityTimeoutPeriod_args()
args.read(iprot)
iprot.readMessageEnd()
result = setInactivityTimeoutPeriod_result()
self._handler.setInactivityTimeoutPeriod(args.periodInSeconds)
oprot.writeMessageBegin("setInactivityTimeoutPeriod", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_shutdown(self, seqid, iprot, oprot):
args = shutdown_args()
args.read(iprot)
iprot.readMessageEnd()
result = shutdown_result()
self._handler.shutdown(args.status)
oprot.writeMessageBegin("shutdown", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_create(self, seqid, iprot, oprot):
args = create_args()
args.read(iprot)
iprot.readMessageEnd()
result = create_result()
try:
result.success = self._handler.create(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("create", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createFile(self, seqid, iprot, oprot):
args = createFile_args()
args.read(iprot)
iprot.readMessageEnd()
result = createFile_result()
try:
result.success = self._handler.createFile(args.path, args.mode, args.overwrite, args.bufferSize, args.block_replication, args.blocksize)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("createFile", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_open(self, seqid, iprot, oprot):
args = open_args()
args.read(iprot)
iprot.readMessageEnd()
result = open_result()
try:
result.success = self._handler.open(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("open", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_append(self, seqid, iprot, oprot):
args = append_args()
args.read(iprot)
iprot.readMessageEnd()
result = append_result()
try:
result.success = self._handler.append(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("append", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_write(self, seqid, iprot, oprot):
args = write_args()
args.read(iprot)
iprot.readMessageEnd()
result = write_result()
try:
result.success = self._handler.write(args.handle, args.data)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("write", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_read(self, seqid, iprot, oprot):
args = read_args()
args.read(iprot)
iprot.readMessageEnd()
result = read_result()
try:
result.success = self._handler.read(args.handle, args.offset, args.size)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("read", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_close(self, seqid, iprot, oprot):
args = close_args()
args.read(iprot)
iprot.readMessageEnd()
result = close_result()
try:
result.success = self._handler.close(args.out)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("close", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rm(self, seqid, iprot, oprot):
args = rm_args()
args.read(iprot)
iprot.readMessageEnd()
result = rm_result()
try:
result.success = self._handler.rm(args.path, args.recursive)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("rm", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rename(self, seqid, iprot, oprot):
args = rename_args()
args.read(iprot)
iprot.readMessageEnd()
result = rename_result()
try:
result.success = self._handler.rename(args.path, args.dest)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("rename", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mkdirs(self, seqid, iprot, oprot):
args = mkdirs_args()
args.read(iprot)
iprot.readMessageEnd()
result = mkdirs_result()
try:
result.success = self._handler.mkdirs(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("mkdirs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_exists(self, seqid, iprot, oprot):
args = exists_args()
args.read(iprot)
iprot.readMessageEnd()
result = exists_result()
try:
result.success = self._handler.exists(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("exists", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_stat(self, seqid, iprot, oprot):
args = stat_args()
args.read(iprot)
iprot.readMessageEnd()
result = stat_result()
try:
result.success = self._handler.stat(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("stat", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_listStatus(self, seqid, iprot, oprot):
args = listStatus_args()
args.read(iprot)
iprot.readMessageEnd()
result = listStatus_result()
try:
result.success = self._handler.listStatus(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("listStatus", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_chmod(self, seqid, iprot, oprot):
args = chmod_args()
args.read(iprot)
iprot.readMessageEnd()
result = chmod_result()
try:
self._handler.chmod(args.path, args.mode)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("chmod", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_chown(self, seqid, iprot, oprot):
args = chown_args()
args.read(iprot)
iprot.readMessageEnd()
result = chown_result()
try:
self._handler.chown(args.path, args.owner, args.group)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("chown", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setReplication(self, seqid, iprot, oprot):
args = setReplication_args()
args.read(iprot)
iprot.readMessageEnd()
result = setReplication_result()
try:
self._handler.setReplication(args.path, args.replication)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("setReplication", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getFileBlockLocations(self, seqid, iprot, oprot):
args = getFileBlockLocations_args()
args.read(iprot)
iprot.readMessageEnd()
result = getFileBlockLocations_result()
try:
result.success = self._handler.getFileBlockLocations(args.path, args.start, args.length)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("getFileBlockLocations", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class setInactivityTimeoutPeriod_args:
"""
Attributes:
- periodInSeconds
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'periodInSeconds', None, None, ), # 1
)
def __init__(self, periodInSeconds=None,):
self.periodInSeconds = periodInSeconds
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.periodInSeconds = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setInactivityTimeoutPeriod_args')
if self.periodInSeconds != None:
oprot.writeFieldBegin('periodInSeconds', TType.I64, 1)
oprot.writeI64(self.periodInSeconds)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setInactivityTimeoutPeriod_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setInactivityTimeoutPeriod_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class shutdown_args:
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'status', None, None, ), # | |
<reponame>hatemhosny/jupytext
"""ContentsManager that allows to open Rmd, py, R and ipynb files as notebooks
"""
import os
from datetime import timedelta
import nbformat
import mock
from tornado.web import HTTPError
from traitlets import Unicode, Float, Bool, Enum
from traitlets.config import Configurable
# import notebook.transutils before notebook.services.contents.filemanager #75
try:
import notebook.transutils # noqa
except ImportError:
pass
try:
from notebook.services.contents.largefilemanager import LargeFileManager
except ImportError:
# Older versions of notebook do not have the LargeFileManager #217
from notebook.services.contents.filemanager import FileContentsManager as LargeFileManager
from .jupytext import reads, writes, create_prefix_dir
from .combine import combine_inputs_with_outputs
from .formats import rearrange_jupytext_metadata, check_file_version
from .formats import NOTEBOOK_EXTENSIONS, long_form_one_format, long_form_multiple_formats
from .formats import short_form_one_format, short_form_multiple_formats
from .paired_paths import paired_paths, find_base_path_and_format, base_path, full_path, InconsistentPath
from .kernels import set_kernelspec_from_language
def preferred_format(incomplete_format, preferred_formats):
"""Return the preferred format for the given extension"""
incomplete_format = long_form_one_format(incomplete_format)
if 'format_name' in incomplete_format:
return incomplete_format
for fmt in long_form_multiple_formats(preferred_formats):
if ((incomplete_format['extension'] == fmt['extension'] or (
fmt['extension'] == '.auto' and
incomplete_format['extension'] not in ['.md', '.Rmd', '.ipynb'])) and
incomplete_format.get('suffix') == fmt.get('suffix', incomplete_format.get('suffix')) and
incomplete_format.get('prefix') == fmt.get('prefix', incomplete_format.get('prefix'))):
fmt.update(incomplete_format)
return fmt
return incomplete_format
def _jupytext_writes(fmt):
def _writes(nbk, version=nbformat.NO_CONVERT, **kwargs):
return writes(nbk, fmt, version=version, **kwargs)
return _writes
def _jupytext_reads(fmt):
def _reads(text, as_version, **kwargs):
return reads(text, fmt, as_version=as_version, **kwargs)
return _reads
class TextFileContentsManager(LargeFileManager, Configurable):
"""
A FileContentsManager Class that reads and stores notebooks to classical
Jupyter notebooks (.ipynb), R Markdown notebooks (.Rmd), Julia (.jl),
Python (.py) or R scripts (.R)
"""
# Dictionary: notebook path => (fmt, formats) where fmt is the current format, and formats the paired formats.
paired_notebooks = dict()
def all_nb_extensions(self):
"""All extensions that should be classified as notebooks"""
return [ext if ext.startswith('.') else '.' + ext for ext in self.notebook_extensions.split(',')]
default_jupytext_formats = Unicode(
u'',
help='Save notebooks to these file extensions. '
'Can be any of ipynb,Rmd,md,jl,py,R,nb.jl,nb.py,nb.R '
'comma separated. If you want another format than the '
'default one, append the format name to the extension, '
'e.g. ipynb,py:percent to save the notebook to '
'hydrogen/spyder/vscode compatible scripts',
config=True)
preferred_jupytext_formats_save = Unicode(
u'',
help='Preferred format when saving notebooks as text, per extension. '
'Use "jl:percent,py:percent,R:percent" if you want to save '
'Julia, Python and R scripts in the double percent format and '
'only write "jupytext_formats": "py" in the notebook metadata.',
config=True)
preferred_jupytext_formats_read = Unicode(
u'',
help='Preferred format when reading notebooks from text, per '
'extension. Use "py:sphinx" if you want to read all python '
'scripts as Sphinx gallery scripts.',
config=True)
default_notebook_metadata_filter = Unicode(
u'',
help="Cell metadata that should be save in the text representations. "
"Examples: 'all', '-all', 'widgets,nteract', 'kernelspec,jupytext-all'",
config=True)
default_cell_metadata_filter = Unicode(
u'',
help="Notebook metadata that should be saved in the text representations. "
"Examples: 'all', 'hide_input,hide_output'",
config=True)
comment_magics = Enum(
values=[True, False],
allow_none=True,
help='Should Jupyter magic commands be commented out in the text representation?',
config=True)
split_at_heading = Bool(
False,
help='Split markdown cells on headings (Markdown and R Markdown formats only)',
config=True)
sphinx_convert_rst2md = Bool(
False,
help='When opening a Sphinx Gallery script, convert the reStructuredText to markdown',
config=True)
outdated_text_notebook_margin = Float(
1.0,
help='Refuse to overwrite inputs of a ipynb notebooks with those of a '
'text notebook when the text notebook plus margin is older than '
'the ipynb notebook',
config=True)
default_cell_markers = Unicode(
u'',
help='Start and end cell markers for the light format, comma separated. Use "{{{,}}}" to mark cells'
'as foldable regions in Vim, and "region,endregion" to mark cells as Vscode/PyCharm regions',
config=True)
notebook_extensions = Unicode(
u','.join(NOTEBOOK_EXTENSIONS),
help='A comma separated list of notebook extensions',
config=True)
def drop_paired_notebook(self, path):
"""Remove the current notebook from the list of paired notebooks"""
if path not in self.paired_notebooks:
return
fmt, formats = self.paired_notebooks.pop(path)
prev_paired_paths = paired_paths(path, fmt, formats)
for alt_path, _ in prev_paired_paths:
if alt_path in self.paired_notebooks:
self.drop_paired_notebook(alt_path)
def update_paired_notebooks(self, path, fmt, formats):
"""Update the list of paired notebooks to include/update the current pair"""
if not formats:
self.drop_paired_notebook(path)
return
new_paired_paths = paired_paths(path, fmt, formats)
for alt_path, _ in new_paired_paths:
self.drop_paired_notebook(alt_path)
long_formats = long_form_multiple_formats(formats)
if len(long_formats) == 1 and set(long_formats[0]) <= {'extension'}:
return
short_formats = short_form_multiple_formats(formats)
for alt_path, alt_fmt in new_paired_paths:
self.paired_notebooks[alt_path] = short_form_one_format(alt_fmt), short_formats
def set_default_format_options(self, format_options, read=False):
"""Set default format option"""
if self.default_notebook_metadata_filter:
format_options.setdefault('notebook_metadata_filter', self.default_notebook_metadata_filter)
if self.default_cell_metadata_filter:
format_options.setdefault('cell_metadata_filter', self.default_cell_metadata_filter)
if self.comment_magics is not None:
format_options.setdefault('comment_magics', self.comment_magics)
if self.split_at_heading:
format_options.setdefault('split_at_heading', self.split_at_heading)
if not read and self.default_cell_markers:
format_options.setdefault('cell_markers', self.default_cell_markers)
if read and self.sphinx_convert_rst2md:
format_options.setdefault('rst2md', self.sphinx_convert_rst2md)
def default_formats(self, path):
"""Return the default formats, if they apply to the current path #157"""
formats = long_form_multiple_formats(self.default_jupytext_formats)
for fmt in formats:
try:
base_path(path, fmt)
return self.default_jupytext_formats
except InconsistentPath:
continue
return None
def create_prefix_dir(self, path, fmt):
"""Create the prefix dir, if missing"""
create_prefix_dir(self._get_os_path(path.strip('/')), fmt)
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
if model['type'] != 'notebook':
return super(TextFileContentsManager, self).save(model, path)
nbk = model['content']
try:
metadata = nbk.get('metadata')
rearrange_jupytext_metadata(metadata)
jupytext_metadata = metadata.setdefault('jupytext', {})
jupytext_formats = jupytext_metadata.get('formats') or self.default_formats(path)
if not jupytext_formats:
text_representation = jupytext_metadata.get('text_representation', {})
ext = os.path.splitext(path)[1]
fmt = {'extension': ext}
if ext == text_representation.get('extension') and text_representation.get('format_name'):
fmt['format_name'] = text_representation.get('format_name')
jupytext_formats = [fmt]
jupytext_formats = long_form_multiple_formats(jupytext_formats, metadata)
# Set preferred formats if not format name is given yet
jupytext_formats = [preferred_format(fmt, self.preferred_jupytext_formats_save) for fmt in jupytext_formats]
base, fmt = find_base_path_and_format(path, jupytext_formats)
self.update_paired_notebooks(path, fmt, jupytext_formats)
self.set_default_format_options(jupytext_metadata)
if not jupytext_metadata:
metadata.pop('jupytext')
# Save as ipynb first
return_value = None
value = None
for fmt in jupytext_formats[::-1]:
if fmt['extension'] != '.ipynb':
continue
alt_path = full_path(base, fmt)
self.create_prefix_dir(alt_path, fmt)
self.log.info("Saving %s", os.path.basename(alt_path))
value = super(TextFileContentsManager, self).save(model, alt_path)
if alt_path == path:
return_value = value
# And then to the other formats, in reverse order so that
# the first format is the most recent
for fmt in jupytext_formats[::-1]:
if fmt['extension'] == '.ipynb':
continue
alt_path = full_path(base, fmt)
self.create_prefix_dir(alt_path, fmt)
if 'format_name' in fmt and fmt['extension'] not in ['.Rmd', '.md']:
self.log.info("Saving %s in format %s:%s",
os.path.basename(alt_path), fmt['extension'][1:], fmt['format_name'])
else:
self.log.info("Saving %s", os.path.basename(alt_path))
with mock.patch('nbformat.writes', _jupytext_writes(fmt)):
value = super(TextFileContentsManager, self).save(model, alt_path)
if alt_path == path:
return_value = value
# Update modified timestamp to match that of the pair #207
return_value['last_modified'] = value['last_modified']
return return_value
except Exception as err:
raise HTTPError(400, str(err))
def get(self, path, content=True, type=None, format=None, load_alternative_format=True):
""" Takes a path for an entity and returns its model"""
path = path.strip('/')
os_path = self._get_os_path(path)
ext = os.path.splitext(path)[1]
# Not a notebook?
if (not self.exists(path) or os.path.isdir(os_path) or
(type != 'notebook' if type else ext not in self.all_nb_extensions())):
return super(TextFileContentsManager, self).get(path, content, type, format)
fmt = preferred_format(ext, self.preferred_jupytext_formats_read)
if ext == '.ipynb':
model = self._notebook_model(path, content=content)
else:
self.set_default_format_options(fmt, read=True)
with mock.patch('nbformat.reads', _jupytext_reads(fmt)):
model = self._notebook_model(path, content=content)
if not load_alternative_format:
return model
if not content:
# Modification time of a paired notebook, in this context - Jupyter is checking timestamp
# before saving - is the most recent among all representations #118
if path not in self.paired_notebooks:
return model
fmt, formats = self.paired_notebooks.get(path)
for alt_path, _ in paired_paths(path, fmt, formats):
if alt_path != path and self.exists(alt_path):
alt_model = self._notebook_model(alt_path, content=False)
if alt_model['last_modified'] > model['last_modified']:
model['last_modified'] = alt_model['last_modified']
return model
# We will now read a second file if this is a paired notebooks.
nbk = model['content']
jupytext_formats = nbk.metadata.get('jupytext', {}).get('formats') or self.default_formats(path)
jupytext_formats = long_form_multiple_formats(jupytext_formats)
# Compute paired notebooks from formats
alt_paths = [(path, fmt)]
if jupytext_formats:
try:
_, fmt = find_base_path_and_format(path, jupytext_formats)
alt_paths = paired_paths(path, fmt, jupytext_formats)
self.update_paired_notebooks(path, fmt, jupytext_formats)
except InconsistentPath as err:
self.log.info("Unable to read paired notebook: %s", str(err))
else:
if path in self.paired_notebooks:
fmt, formats = self.paired_notebooks.get(path)
alt_paths = paired_paths(path, fmt, formats)
if len(alt_paths) > 1 and ext == '.ipynb':
# Apply default options (like saving and reloading would do)
jupytext_metadata = model['content']['metadata'].get('jupytext', {})
self.set_default_format_options(jupytext_metadata, read=True)
if jupytext_metadata:
model['content']['metadata']['jupytext'] = jupytext_metadata
org_model = model
fmt_inputs = fmt
path_inputs = path_outputs = path
model_outputs = None
# Source format is first non ipynb format found on disk
if path.endswith('.ipynb'):
for alt_path, alt_fmt in alt_paths:
if not alt_path.endswith('.ipynb') and self.exists(alt_path):
self.log.info(u'Reading SOURCE from {}'.format(alt_path))
path_inputs = alt_path
fmt_inputs = alt_fmt
model_outputs = model
model = self.get(alt_path, content=content, type=type, format=format,
load_alternative_format=False)
break
# Outputs taken from ipynb if in group, if file exists
else:
for alt_path, _ in alt_paths:
| |
for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import Runner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
Runner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
rfilepath = realpath(filepath)
if islink(filepath) and exists(rfilepath):
# trust noone - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(rfilepath, (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def assure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def assure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a text_type)
iterate over it.
"""
if isinstance(s, list):
return s if not copy else s[:]
elif isinstance(s, text_type):
return [s]
elif iterate and hasattr(s, '__iter__'):
return list(s)
elif s is None:
return []
else:
return [s]
def assure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def assure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in assure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def assure_unicode(s, encoding='utf-8'):
"""Convert/decode to unicode (PY2) or str (PY3) if of 'binary_type'"""
return s.decode(encoding) if isinstance(s, binary_type) else s
def assure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, string_types):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def unique(seq, key=None):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
"""
seen = set()
seen_add = seen.add
if not key:
return [x for x in seq if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
return [x for x in seq if not (key(x) in seen or seen_add(key(x)))]
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
def better_wraps(to_be_wrapped):
"""Decorator to replace `functools.wraps`
This is based on `wrapt` instead of `functools` and in opposition to `wraps`
preserves the correct signature of the decorated function.
It is written with the intention to replace the use of `wraps` without any
need to rewrite the actual decorators.
"""
@wrapt.decorator(adapter=to_be_wrapped)
def intermediator(to_be_wrapper, instance, args, kwargs):
return to_be_wrapper(*args, **kwargs)
return intermediator
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], collections.Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('DATALAD_TESTS_TEMP_DIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def newfunc(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return newfunc
#
# Context Managers
#
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = open(tempfile.mktemp(**kw), 'w')
self._err = open(tempfile.mktemp(**kw), 'w')
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
self._out.flush()
return self._read(self._out)
@property
def err(self):
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
sys.stdout.write(sep.join(args) + end)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(__builtin__, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(__builtin__, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(__builtin__, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
out_file = tempfile.mktemp(**kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) | |
]
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (args.adam_beta1, args.adam_beta2),
"eps": args.adam_epsilon,
}
optimizer_kwargs["lr"] = args.learning_rate
return optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
def convert_example_to_features(example, tokenizer, max_seq_length, doc_stride, max_query_length):
Feature = collections.namedtuple(
"Feature",
[
"unique_id",
"tokens",
"example_index",
"token_to_orig_map",
"token_is_max_context",
],
)
extra = []
unique_id = 0
query_tokens = tokenizer.tokenize(example["question"])[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example["context"]):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(
doc_spans, doc_span_index, split_token_index
)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
feature = Feature(
unique_id=unique_id,
tokens=tokens,
example_index=0,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
)
extra.append(feature)
unique_id += 1
# extra is used as additional data but sparseml doesn't support it
return (
torch.from_numpy(np.array([np.array(input_ids, dtype=np.int64)])),
torch.from_numpy(np.array([np.array(input_mask, dtype=np.int64)])),
torch.from_numpy(np.array([np.array(segment_ids, dtype=np.int64)])),
)
def _check_is_max_context(doc_spans, cur_span_index, position):
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def drop_layers(model, layers_to_keep):
layer_drop_matching = {
1:[0],
3:[0,5,11],
6:[0,2,4,6,8,11],
9:[0,2,3,4,5,7,8,9,11],
12:[0,1,2,3,4,5,6,7,8,9,10,11],
}
encoder_layers = model.bert.encoder.layer # change based on model name
assert layers_to_keep <= len(encoder_layers)
assert layers_to_keep in layer_drop_matching.keys()
trimmed_encoder_layers = nn.ModuleList()
for i in layer_drop_matching[layers_to_keep]:
trimmed_encoder_layers.append(encoder_layers[i])
trimmed_model = copy.deepcopy(model)
trimmed_model.bert.encoder.layer = trimmed_encoder_layers
return trimmed_model
####################################################################################
# End SparseML Integration
####################################################################################
def main():
### Dataset processing classes in main due to hugging face custom dataset map
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=data_args.max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (
offsets[token_start_index][0] <= start_char
and offsets[token_end_index][1] >= end_char
):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while (
token_start_index < len(offsets)
and offsets[token_start_index][0] <= start_char
):
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
# Post-processing:
def post_processing_function(examples, features, predictions):
predictions = postprocess_qa_predictions(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=data_args.version_2_with_negative,
n_best_size=data_args.n_best_size,
max_answer_length=data_args.max_answer_length,
null_score_diff_threshold=data_args.null_score_diff_threshold,
output_dir=training_args.output_dir,
is_world_process_zero=trainer.is_world_process_zero(),
)
if data_args.version_2_with_negative:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0}
for k, v in predictions.items()
]
else:
formatted_predictions = [
{"id": k, "prediction_text": v} for k, v in predictions.items()
]
references = [
{"id": ex["id"], "answers": ex[answer_column_name]}
for ex in datasets["validation"]
]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
# Validation preprocessing
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=data_args.max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
transformers.utils.logging.set_verbosity_info()
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
)
logger.setLevel(
logging.INFO if is_main_process(training_args.local_rank) else logging.WARN
)
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
set_seed(training_args.seed)
if data_args.dataset_name is not None:
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
datasets = load_dataset(extension, data_files=data_files, field="data")
config = AutoConfig.from_pretrained(
model_args.config_name
if model_args.config_name
else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name
if model_args.tokenizer_name
else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
)
model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if data_args.layers_to_keep > 0:
logger.info("Keeping %s model layers", data_args.layers_to_keep)
| |
= None
''' '''
preset_menu = None
''' '''
preset_subdir = None
''' '''
preset_values = None
''' '''
def as_filename(self, name):
'''
'''
pass
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def check(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class AddPresetHairDynamics(AddPresetBase, bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
preset_defines = None
''' '''
preset_menu = None
''' '''
preset_subdir = None
''' '''
preset_values = None
''' '''
def as_filename(self, name):
'''
'''
pass
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def check(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class AddPresetInterfaceTheme(AddPresetBase, bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
preset_menu = None
''' '''
preset_subdir = None
''' '''
def as_filename(self, name):
'''
'''
pass
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def check(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class AddPresetKeyconfig(AddPresetBase, bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
preset_menu = None
''' '''
preset_subdir = None
''' '''
def add(self, _context, filepath):
'''
'''
pass
def as_filename(self, name):
'''
'''
pass
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def check(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def post_cb(self, context):
'''
'''
pass
def pre_cb(self, context):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class AddPresetNodeColor(AddPresetBase, bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
preset_defines = None
''' '''
preset_menu = None
''' '''
preset_subdir = None
''' '''
preset_values = None
''' '''
def as_filename(self, name):
'''
'''
pass
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def check(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class AddPresetOperator(AddPresetBase, bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
preset_defines = None
''' '''
preset_menu = None
''' '''
preset_subdir = None
''' '''
preset_values = None
''' '''
def as_filename(self, name):
'''
'''
pass
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def check(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def operator_path(self, operator):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class AddPresetRender(AddPresetBase, bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
preset_defines = None
''' '''
preset_menu = None
''' '''
preset_subdir = None
''' '''
preset_values = None
| |
arg_default
# If a relative path has been provided either in the environmental
# var or as the default that path needs to be appended to the
# module path. Since all full paths begin with a path delimiter
# this is a valid test
if my_tmp[0] != "/":
my_tmp = (
PathUtils.include_trailing_path_delimiter(self.module_dir)
+ my_tmp
)
# OK here is where it gets a bit tricky, when passed on the command
# line the path is calculated from the current directory, in all other
# cases from the module path. Since the app should be in the initial
# directory calculating the real path should resolve to a fully
# qualified path. To remove all indirection use real path
my_tmp = PathUtils.real_path(my_tmp)
# At this point the path should be a fully qualified path
# Msg.user( "Result Path: %s" % ( str( my_tmp )))
#
Msg.user("Result Path: %s" % (str(my_tmp)))
if not PathUtils.valid_path(my_tmp):
raise FileNotFoundError(
"Initial Directory for %s Resolution Failed[%s] could not be"
"located" % (arg_cmd_switch, my_tmp)
)
if not PathUtils.check_exe(my_tmp):
my_tmp = PathUtils.include_trailing_path_delimiter(my_tmp)
return my_tmp
# set up director and archive new directories
def initialize_output(self):
self.mode = self.option_def(CmdLine.Switches[CmdLine.mode], None)
self.output_root = PathUtils.exclude_trailing_path_delimiter(
self.option_def(
CmdLine.Switches[CmdLine.target_dir], PathUtils.current_dir()
)
)
Msg.user("Output Root: %s" % (str(self.output_root)))
# check launcher type here since we need to know if we are running
# with LSF
Msg.user("Before Launcher Type", "MASTERRUN")
self.launcher_type = self.option_def(
CmdLine.Switches[CmdLine.run_launcher], Defaults.run_launcher
)
Msg.user("Launcher Type: %s" % (str(self.launcher_type)), "MASTERRUN")
# ok the root output directory has been established.
# next check to see if there is an expiration if there is handle that
# and exit the session
my_expire = self.option_def(CmdLine.Switches[CmdLine.expire], None)
my_session_type = (
Formats.perf_output_dir
if SysUtils.found(self.mode.find(Modes.perf))
else Formats.regress_output_dir
)
if my_expire is not None:
self.handle_expire(my_expire, my_session_type)
raise Exception(
"Problem with handle_expire, should have terminated ....."
)
# Continuing create the full output directory which if exists should
# be archived or removed
my_output_base = Formats.main_output_dir % self.output_root
self.output_dir = "%s/%s/" % (
PathUtils.exclude_trailing_path_delimiter(my_output_base),
PathUtils.exclude_trailing_path_delimiter(my_session_type),
)
Msg.user("Target Output Dir: %s" % (str(self.output_dir)))
mod_time = None
# if there is no expire setting then
if PathUtils.check_dir(self.output_dir):
# check modification time of the directory, if it is created very
# recently, delay a bit when running on LSF.
# since client machines might hold a stale directory handler still.
mod_time = PathUtils.time_modified(self.output_dir)
if self.option_def(
CmdLine.Switches[CmdLine.no_archive], Defaults.no_archive
):
PathUtils.rmdir(
self.output_dir, True
) # remove output directory tree
else:
PathUtils.archive_dir(self.output_dir)
PathUtils.mkdir(self.output_dir)
if mod_time is not None:
self.waitForLfs(mod_time)
return True
# Wait a bit for LSF to expire stale file handle for regression directory
# if running with LSF
def waitForLfs(self, aModTime):
if self.launcher_type == LauncherType.Lsf:
time_diff = int(DateTime.Time() - aModTime)
if time_diff < MasterRun.cLsfWaitTime:
sec_delay = MasterRun.cLsfWaitTime - time_diff
Msg.info(
"Using LSF, delaying %d seconds so that stale "
"output/regression file handle will expire..." % sec_delay
)
SysUtils.sleep_seconds_with_progress(sec_delay)
Msg.info("Waiting done, resumed master run")
def writeVersionInfo(self):
out_line_fmt = (
"{}, scm_system: {}, revision number: {}, location: {}, url: {}\n"
)
version_info = ""
for app_tag, app_config in self.m_app_info.mTagToApp.items():
Msg.user("app_tag: %s, app_config: %s" % (app_tag, app_config))
version_data = app_config.parameter("version")
for item in version_data:
if item["status"]:
version_info += out_line_fmt.format(
app_config.name(),
item["scm_type"],
str(item["version"]),
item["folder"],
item["url"],
)
with open(self.output_dir + "version_info.txt", "w+") as outfile:
if version_info:
outfile.write(version_info)
else:
outfile.write("No version information found")
# Call the report methods from each of the sequence apps. Some apps
# report, others pass through
def modulesReport(self):
for app_cfg in self.m_app_info.mSequenceApps:
reporter = app_cfg.createReporter()
reporter.report(self.m_app_info, app_cfg.tag())
@staticmethod
def to_int(a_value):
return int(a_value)
@staticmethod
def to_hex(a_value):
return hex(int(a_value, 0))
# populate the general options
def populate_options(self):
self.options[CtrlItmKeys.fdir] = self.fctrl_dir
self.options[CtrlItmKeys.no_sim] = self.option_def(
CmdLine.Switches[CmdLine.no_sim], CtrlItmDefs.no_sim
)
self.options[CtrlItmKeys.num_chips] = self.option_def(
CmdLine.Switches[CmdLine.num_chips],
CtrlItmDefs.num_chips,
self.to_int,
)
self.options[CtrlItmKeys.num_cores] = self.option_def(
CmdLine.Switches[CmdLine.num_cores],
CtrlItmDefs.num_cores,
self.to_int,
)
self.options[CtrlItmKeys.num_threads] = self.option_def(
CmdLine.Switches[CmdLine.num_threads],
CtrlItmDefs.num_threads,
self.to_int,
)
self.options[CtrlItmKeys.min_instr] = self.option_def(
CmdLine.Switches[CmdLine.min_instr],
CtrlItmDefs.min_instr,
self.to_int,
)
self.options[CtrlItmKeys.max_instr] = self.option_def(
CmdLine.Switches[CmdLine.max_instr],
CtrlItmDefs.max_instr,
self.to_int,
)
self.options[CtrlItmKeys.timeout] = self.option_def(
CmdLine.Switches[CmdLine.timeout], CtrlItmDefs.timeout, self.to_int
)
self.options[CtrlItmKeys.seed] = self.option_def(
CmdLine.Switches[CmdLine.seed], CtrlItmDefs.seed, self.to_hex
)
self.options[CtrlItmKeys.suffix] = Defaults.suffix
self.max_fails = self.option_def(
CmdLine.Switches[CmdLine.max_fails],
Defaults.max_fails,
self.to_int,
)
if self.max_fails > 0:
self.is_term_proc = self.query_terminated
self.on_fail_proc = self.handle_on_fail
def process_general_options(self):
# run options
self.num_runs = self.option_def(
CmdLine.Switches[CmdLine.num_runs], Defaults.num_runs, self.to_int
)
self.sum_level = self.option_def(
CmdLine.Switches[CmdLine.sum_level], SummaryLevel.Fail, self.to_int
)
Msg.user("process-max: %d" % (self.m_app_info.mProcessMax), "MASTER")
# create the proper summary
def initialize_summary(self):
my_keep = self.option_def(
CmdLine.Switches[CmdLine.keep], Defaults.keep
)
clean_up_rules = CleanUpRules(my_keep)
if SysUtils.found(self.mode.find(Modes.perf)):
self.mode = Modes.perf
self.options[CtrlItmKeys.no_sim] = True
self.summary = PerformanceSummary(self.output_dir, clean_up_rules)
elif SysUtils.found(self.mode.find(Modes.regress)):
self.mode = Modes.regress
self.summary = RegressionSummary(self.output_dir, clean_up_rules)
else:
self.mode = Modes.count
self.m_app_info.mMode = "count"
self.summary = RegressionSummary(self.output_dir, clean_up_rules)
if self.summary is not None:
self.summary.set_on_fail_proc(self.on_fail_proc)
self.summary.set_is_term_proc(self.is_term_proc)
def initialize_process_queue(self):
global workers_done_event
self.process_queue = ProcessQueue()
self.process_queue.process_cmd = self.process_cmd
self.process_queue.processor_name = self.processor_name
self.process_queue.summary = self.summary
self.process_queue.process_max = self.m_app_info.mProcessMax
self.process_queue.launcher_type = self.launcher_type
Msg.user("Done Event: %s" % (str(workers_done_event)), "MAIN")
self.process_queue.done_event = workers_done_event
# Msg.user( "Done Event: %s" % (str( workers_done_event)), "MAIN")
self.process_queue.open_queue()
# self.process_queue.open_queue(self.process_cmd, self.summary,
# self.process_max, workers_done_event,
# self.process_launcher )
def initialize_processor_cmd(self):
# the default task processor is "forrest_run.py"
# the default directory is the same directory as the master_run
# the processor can be replaced with a command line argument which may
# or may not contain a path
# if it does not contain a path then the default path will be used
# if a directory is passed on the command line in all cases that will
# be the location of the processor
my_run_dir = None
my_run_name = None
my_tmp_name = None
my_tmp_path = None
my_run_path = self.option_def(CmdLine.Switches[CmdLine.run_name], None)
if my_run_path is not None:
my_run_dir, my_run_name = PathUtils.split_path(my_run_path)
Msg.user(
"Client Dir: %s, Client Name: %s (1)"
% (str(my_run_dir), str(my_run_name)),
"PROCESS_CMD",
)
if my_run_dir is None:
my_tmp_path = self.locate_directory(
CmdLine.Switches[CmdLine.run_dir],
EnVars.run_path,
self.module_dir,
)
if PathUtils.check_exe(my_tmp_path):
my_run_dir, my_tmp_name = PathUtils.split_path(my_tmp_path)
else:
my_run_dir = my_tmp_path
if my_run_name is None:
my_run_name = (
my_tmp_name if my_tmp_name is not None else Defaults.run_name
)
my_process_cmd = PathUtils.real_path(
PathUtils.append_path(
PathUtils.include_trailing_path_delimiter(my_run_dir),
my_run_name,
)
)
Msg.user("Process Cmd: %s (1)" % (str(my_process_cmd)), "PROCESS_CMD")
my_msg_lev = self.option_def(
CmdLine.Switches[CmdLine.client_lev], None
)
if my_msg_lev is not None:
if my_msg_lev:
my_process_cmd += Msg.get_level_as_str()
else:
my_process_cmd += " -l " + my_msg_lev
Msg.user(
"Process Cmd: %s" % (str(my_process_cmd)), "PROCESS_CMD"
)
if self.m_app_info.mConfigPath is not None:
my_process_cmd += " -w %s" % self.m_app_info.mConfigPath
my_process_cmd += " -f %s"
self.processor_name = my_run_name.replace(".py", "").replace(
"_run", ""
)
self.process_cmd = my_process_cmd
Msg.dbg("Process Cmd: %s" % (str(self.process_cmd)))
# Msg.dbg( "Process Cmd: %s" % (self.process_cmd))
# An expire value has been found
# There are several possibilities
# 1. "clean" was pass on the command line, in this case remove the output
# directory which should the last directory that received output for
# that mode
# 2. "purge" was passed on the command line, in this case purge all
# output directories for that mode
# 3. a zero (0) was passed on the command line
# 4. a none zero integer was passed on the command line, purge all
# directories related to the mode
# if none of these are found and exception is raised in all cases
# master_run terminates immediately
def handle_expire(self, arg_expire, arg_mask):
try:
Msg.user(
"Expire: %s, Mask: %s" % (str(arg_expire), str(arg_mask)),
"EXPIRE",
)
my_output_base = Formats.main_output_dir % self.output_root
Msg.user(
"Expire [1], Output Base: %s" % (str(my_output_base)), "EXPIRE"
)
Msg.info("Output Directories Cleanup, Please wait ...")
if int(arg_expire) == Expire.all:
Msg.info("Building Directory List, [%s]" % (my_output_base))
my_dirs = PathUtils.list_dirs(my_output_base)
Msg.dbg("All Dirs: %s" % (str(my_dirs)), "EXPIRE")
for my_dir in my_dirs:
if my_dir.startswith(arg_mask):
my_full_dir = "%s%s" % (
PathUtils.include_trailing_path_delimiter(
my_output_base
),
my_dir,
)
Msg.info("Removing: %s" % (my_full_dir))
PathUtils.rmdir(my_full_dir, True)
else:
Msg.info(
"Checking for Expired Directories: %s" % (my_full_dir)
)
my_expiredate = DateTime.DateDelta(int(my_expire))
PathUtils.expire(my_full_dir, my_expiredate)
except Exception as ex:
Msg.error_trace()
Msg.err(str(ex))
finally:
Msg.info("Operation Complete, Restart Master Run to continue ...")
sys.exit(1)
def run_mode(self):
return self.mode
def sig_interrupt_handler(a_signal, frame):
# Allows us to catch SIGINTs and inform child threads to quit at next
# chance. Since the child threads are daemons, this should not be needed,
# but we can remove | |
as shaded regions.
Parameters
----------
data : dict
The Python data dictionary generated from running main.py
type_ : str
Which type of data to plot, one of "eval" or "train"
ind : iter of int
The list of hyperparameter settings indices to plot
smooth_over : int
The number of previous data points to smooth over. Note that this
is *not* the number of timesteps to smooth over, but rather the number
of data points to smooth over. For example, if you save the return
every 1,000 timesteps, then setting this value to 15 will smooth
over the last 15 readings, or 15,000 timesteps.
fig : plt.figure
The figure to plot on, by default None. If None, creates a new figure
ax : plt.Axes
The axis to plot on, by default None, If None, creates a new axis
figsize : tuple(int, int)
The size of the figure to plot
name : str, optional
The name of the agent, used for the legend
last_ind : int, optional
The index of the last element to plot in the returns list,
by default -1. This is useful if you want to plot many things on the
same axis, but all of which have a different number of elements. This
way, we can plot the first last_ind elements of each returns for each
agent.
xlim : float, optional
The x limit for the plot, by default None
ylim : float, optional
The y limit for the plot, by default None
alpha : float, optional
The alpha channel for the plot, by default 0.1
colours : list of str
The colours to use for each plot of each hyperparameter setting
env_type : str, optional
The type of environment, one of 'continuing', 'episodic'. By default
'continuing'
significance : float, optional
The significance level for the confidence interval, by default 0.01
Returns
-------
plt.figure, plt.Axes
The figure and axes of the plot
Raises
------
ValueError
When an axis is passed but no figure is passed
When an appropriate number of colours is not specified to cover all
hyperparameter settings
"""
if colours is not None and len(colours) != len(ind):
raise ValueError("must have one colour for each hyperparameter " +
"setting")
if colours is None:
colours = _get_default_colours(ind)
# Set up figure
# if ax is None and fig is None:
# fig = plt.figure(figsize=figsize)
# ax = fig.add_subplot()
# if xlim is not None:
# ax.set_xlim(xlim)
# if ylim is not None:
# ax.set_ylim(ylim)
conf_level = "{:.2f}".format(1-significance)
title = f"""Average {type_.title()} Return per Run with {conf_level}
Confidence Intervals"""
fig, ax = _setup_fig(fig, ax, figsize, title, xlim, ylim, xlabel, ylabel)
# Plot with bootstrap confidence interval
for i in range(len(ind)):
data = exp.reduce_episodes(data, ind[i], type_=type_)
_, mean, conf = exp.get_mean_err(data, type_, ind[i], smooth_over,
exp.bootstrap_conf,
err_args={
"significance": significance,
},
keep_shape=keep_shape)
mean = mean[:last_ind]
conf = conf[:, :last_ind]
episodes = np.arange(mean.shape[0])
# Plot based on colours
label = name
print(mean.shape, conf.shape, episodes.shape)
_plot_shaded(ax, episodes, mean, conf, colours[i], label, alpha)
ax.legend()
conf_level = "{:.2f}".format(1-significance)
ax.set_title(f"""Average {type_.title()} Return per Run with {conf_level}
Confidence Intervals""")
# ax.set_ylabel(ylabel)
# ax.set_xlabel(xlabel)
fig.show()
return fig, ax
@deprecated(reason="""Use exp.get_mean_err with exp.bootstrap_conf""")
def get_mean_bootstrap_conf(data, type_, ind, smooth_over,
env_type="continuing", significance=0.01,
keep_shape=False):
"""
Gets the mean return per episode along with the bootstrap confidence
interval on an episode-by-episode basis over all runs.
Parameters
----------
data : dict
The Python data dictionary generated from running main.py
type_ : str
Which type of data to plot, one of "eval" or "train"
ind : int
The hyperparameter setting to plot
smooth_over : int
The number of previous returns to smooth over. If less than 2, then
no smoothing is performed
env_type : str, optional
The type of environment, one of 'continuing', 'episodic'. By default
'continuing'
significance : float, optional
The significance level for the confidence interval, by default 0.01
Returns
-------
3-tuple of array_like
The timesteps at each return to plot, the mean return to plot, and
the confidence interval for each episode to plot as a shaded region
"""
timesteps = None # So the linter doesn't have a temper tantrum
# Determine the timesteps to plot at
if type_ == "eval":
timesteps = \
data["experiment_data"][ind]["runs"][0]["timesteps_at_eval"]
elif type_ == "train":
timesteps_per_ep = \
data["experiment_data"][ind]["runs"][0]["train_episode_steps"]
timesteps = exp.get_cumulative_timesteps(timesteps_per_ep)
# Get the mean over all episodes per evaluation step (for online
# returns, this axis will have length 1 so we squeeze it)
returns = exp.get_returns(data, type_, ind, env_type=env_type)
returns = returns.mean(axis=-1)
# Smooth over returns
returns = exp.smooth(returns, smooth_over, keep_shape=keep_shape)
# Get the standard deviation of mean episodes per evaluation
# step over all runs
conf = exp.bootstrap_conf(returns, significance)
# Get the mean over all runs
mean = returns.mean(axis=0)
# If we do not keep the shape, then due to smoothing we lose some
# episodes
if not keep_shape:
end = len(timesteps) - smooth_over + 1
timesteps = timesteps[:end]
return timesteps, mean, conf
def plot_mean_with_runs(data, type_, ind, smooth_over, names, colours=None,
figsize=(12, 6), xlim=None, ylim=None, alpha=0.2,
plot_avg=True, env_type="continuing",
keep_shape=False, fig=None, ax=None, detrend=None):
"""
Plots the mean return over all runs and the return for each run for a list
of data dictionaries and hyperparameter indices
Plots both the mean return per episode (over runs) as well as the return
for each individual run (including "mini-runs", if a set of concurrent
episodes were run for all runs, e.g. multiple evaluation episodes per
run at set intervals)
Note that this function takes in a list of data dictionaries and will
plot the runs for each ind (which is a list of lists, where each super-list
refers to a data dictionary and each sub-list refers to the indices for
that data dictionary to plot).
Example
-------
plot_mean_with_runs([sac_data, linear_data], "train", [[3439], [38, 3]],
smooth_over=[5, 2], names=["SAC", "LAC"], figsize=(12, 6), alpha=0.2,
plot_avg=True, env_type="episodic")
will plot hyperparameter index 3439 for the sac_data, smoothing over the
last 5 episodes, and the label will have the term "SAC" in it; also plots
the mean and each individual run on the linear_data for hyperparameter
settings 38 and 3, smoothing over the last 2 episodes for each and with
the term "LAC" in the labels.
Parameters
----------
data : list of dict
The Python data dictionaries generated from running main.py for the
agents
type_ : str
Which type of data to plot, one of "eval" or "train"
ind : iter of iter of int
The list of lists of hyperparameter settings indices to plot for
each agent. For example [[1, 2], [3, 4]] means that the first agent
plots will use hyperparameter settings indices 1 and 2, while the
second will use 3 and 4.
smooth_over : list of int
The number of previous data points to smooth over for the agent's
plot for each data dictionary. Note that this is *not* the number of
timesteps to smooth over, but rather the number of data points to
smooth over. For example, if you save the return every 1,000
timesteps, then setting this value to 15 will smooth over the last
15 readings, or 15,000 timesteps. For example, [1, 2] will mean that
the plots using the first data dictionary will smooth over the past 1
data points, while the second will smooth over the passed 2 data
points for each hyperparameter setting.
figsize : tuple(int, int)
The size of the figure to plot
names : list of str
The name of the agents, used for the legend
colours : list of list of str, optional
The colours to use for each hyperparameter settings plot for each data
dictionary, by default None
xlim : float, optional
The x limit for the plot, by default None
ylim : float, optional
The y limit for the plot, by default None
alpha : float, optional
The alpha to use for plots of the runs, by default 0.1
plot_avg : bool, optional
If | |
"""Test suite for faceted module"""
from collections import OrderedDict
from itertools import product
import matplotlib.axes
import matplotlib.figure
import matplotlib.pyplot as plt
import numpy as np
import pytest
from ..faceted import (
_DEFAULT_ASPECT,
_DEFAULT_WIDTH,
faceted,
faceted_ax,
_infer_constraints,
_infer_grid_class,
HeightConstrainedAxesGrid,
HeightAndWidthConstrainedAxesGrid,
WidthConstrainedAxesGrid,
)
plt.switch_backend("agg")
_TOP_PAD = _BOTTOM_PAD = _LEFT_PAD = _RIGHT_PAD = 0.25
_HORIZONTAL_INTERNAL_PAD = 0.25
_VERTICAL_INTERNAL_PAD = 0.5
_INTERNAL_PAD = (_HORIZONTAL_INTERNAL_PAD, _VERTICAL_INTERNAL_PAD)
_ASPECT_CONSTRAINT = 0.5
_HEIGHT_CONSTRAINT = 7.0
_WIDTH_CONSTRAINT = 8.0
_SHORT_SIDE_PAD = 0.25
_LONG_SIDE_PAD = 0.25
_CBAR_THICKNESS = 0.125
def get_bounds(fig, ax):
return ax.bbox.transformed(fig.transFigure.inverted()).bounds
def test_faceted_cbar_mode_none():
fig, axes = faceted(1, 2, width=_WIDTH_CONSTRAINT, aspect=_ASPECT_CONSTRAINT)
assert len(axes) == 2
plt.close(fig)
def test_faceted_cbar_mode_single():
fig, axes, cax = faceted(
1, 2, width=_WIDTH_CONSTRAINT, aspect=_ASPECT_CONSTRAINT, cbar_mode="single"
)
assert len(axes) == 2
plt.close(fig)
def test_faceted_cbar_mode_each():
fig, axes, caxes = faceted(
1, 2, width=_WIDTH_CONSTRAINT, aspect=_ASPECT_CONSTRAINT, cbar_mode="each"
)
assert len(axes) == 2
assert len(axes) == len(caxes)
plt.close(fig)
@pytest.mark.parametrize(
("width", "height", "aspect"), [(1, 1, None), (1, None, 1), (None, 1, 1)]
)
def test_faceted_cbar_mode_invalid(width, height, aspect):
with pytest.raises(ValueError):
faceted(1, 2, width=width, height=height, aspect=aspect, cbar_mode="invalid")
def test_faceted_invalid_internal_pad():
with pytest.raises(ValueError):
faceted(
1,
2,
width=_WIDTH_CONSTRAINT,
aspect=_ASPECT_CONSTRAINT,
internal_pad=(1, 2, 3),
)
@pytest.mark.parametrize(
("inputs", "expected"),
[
((None, None, None), (_DEFAULT_WIDTH, None, _DEFAULT_ASPECT)),
((3.0, None, None), (3.0, None, _DEFAULT_ASPECT)),
((None, 3.0, None), (None, 3.0, _DEFAULT_ASPECT)),
((None, None, 3.0), (_DEFAULT_WIDTH, None, 3.0)),
((3.0, 3.0, None), (3.0, 3.0, None)),
((None, 3.0, 3.0), (None, 3.0, 3.0)),
((3.0, None, 3.0), (3.0, None, 3.0)),
((3.0, 3.0, 3.0), ValueError),
],
ids=lambda x: str(x),
)
def test__infer_constraints(inputs, expected):
if not isinstance(expected, tuple) and issubclass(expected, Exception):
with pytest.raises(expected):
_infer_constraints(*inputs)
else:
result = _infer_constraints(*inputs)
assert result == expected
@pytest.mark.parametrize(
("width", "height", "aspect", "expected"),
[
(5.0, 5.0, None, HeightAndWidthConstrainedAxesGrid),
(5.0, None, 5.0, WidthConstrainedAxesGrid),
(None, 5.0, 5.0, HeightConstrainedAxesGrid),
],
)
def test__infer_grid_class(width, height, aspect, expected):
result = _infer_grid_class(width, height, aspect)
assert result == expected
_LAYOUTS = [(1, 1), (1, 2), (2, 1), (2, 2), (5, 3)]
_CBAR_MODES = [None, "single", "each", "edge"]
_CBAR_LOCATIONS = ["bottom", "right", "top", "left"]
_CONSTRAINTS = ["height-and-aspect", "width-and-aspect", "height-and-width"]
_CG_LAYOUTS = product(_CBAR_MODES, _CBAR_LOCATIONS, _LAYOUTS, _CONSTRAINTS)
def format_layout(layout):
cbar_mode, cbar_loc, (rows, cols), constraint = layout
return "cbar_mode={!r}, cbar_location={!r}, rows={}, cols={}, constraint={}".format(
cbar_mode, cbar_loc, rows, cols, constraint
)
_CG_IDS = OrderedDict([(layout, format_layout(layout)) for layout in _CG_LAYOUTS])
@pytest.fixture(params=_CG_IDS.keys(), ids=_CG_IDS.values())
def grid(request):
mode, location, (rows, cols), constraint = request.param
if constraint == "width-and-aspect":
obj = WidthConstrainedAxesGrid(
rows,
cols,
width=_WIDTH_CONSTRAINT,
aspect=_ASPECT_CONSTRAINT,
top_pad=_TOP_PAD,
bottom_pad=_BOTTOM_PAD,
left_pad=_LEFT_PAD,
right_pad=_RIGHT_PAD,
cbar_mode=mode,
cbar_pad=_LONG_SIDE_PAD,
axes_pad=_INTERNAL_PAD,
cbar_location=location,
cbar_size=_CBAR_THICKNESS,
cbar_short_side_pad=_SHORT_SIDE_PAD,
)
elif constraint == "height-and-aspect":
obj = HeightConstrainedAxesGrid(
rows,
cols,
height=_HEIGHT_CONSTRAINT,
aspect=_ASPECT_CONSTRAINT,
top_pad=_TOP_PAD,
bottom_pad=_BOTTOM_PAD,
left_pad=_LEFT_PAD,
right_pad=_RIGHT_PAD,
cbar_mode=mode,
cbar_pad=_LONG_SIDE_PAD,
axes_pad=_INTERNAL_PAD,
cbar_location=location,
cbar_size=_CBAR_THICKNESS,
cbar_short_side_pad=_SHORT_SIDE_PAD,
)
elif constraint == "height-and-width":
obj = HeightAndWidthConstrainedAxesGrid(
rows,
cols,
height=_HEIGHT_CONSTRAINT,
width=_WIDTH_CONSTRAINT,
top_pad=_TOP_PAD,
bottom_pad=_BOTTOM_PAD,
left_pad=_LEFT_PAD,
right_pad=_RIGHT_PAD,
cbar_mode=mode,
cbar_pad=_LONG_SIDE_PAD,
axes_pad=_INTERNAL_PAD,
cbar_location=location,
cbar_size=_CBAR_THICKNESS,
cbar_short_side_pad=_SHORT_SIDE_PAD,
)
else:
raise NotImplementedError()
yield obj
plt.close(obj.fig)
def get_tile_width(grid, left_pad=_LEFT_PAD, right_pad=_RIGHT_PAD):
return (
grid.width - left_pad - right_pad - (grid.cols - 1) * _HORIZONTAL_INTERNAL_PAD
) / grid.cols
def get_tile_height(grid, bottom_pad=_BOTTOM_PAD, top_pad=_TOP_PAD):
return (
grid.height - bottom_pad - top_pad - (grid.rows - 1) * _VERTICAL_INTERNAL_PAD
) / grid.rows
def test_constrained_axes_positions(grid):
if grid.cbar_mode == "each":
check_constrained_axes_positions_each(grid)
elif grid.cbar_mode == "single":
check_constrained_axes_positions_single(grid)
elif grid.cbar_mode == "edge":
check_constrained_axes_positions_edge(grid)
elif grid.cbar_mode is None:
check_constrained_axes_positions_none(grid)
def test_constrained_caxes_positions(grid):
if grid.cbar_mode == "each":
check_constrained_caxes_positions_each(grid)
elif grid.cbar_mode == "single":
check_constrained_caxes_positions_single(grid)
elif grid.cbar_mode == "edge":
check_constrained_caxes_positions_edge(grid)
elif grid.cbar_mode is None:
pytest.skip("Skipping colorbar positions test, because cbar_mode=None")
def test_plot_aspect(grid):
fig = grid.fig
width, height = fig.get_size_inches()
for ax in grid.axes:
ax_bounds = get_bounds(fig, ax)
_, _, _plot_width, _plot_height = ax_bounds
plot_width = _plot_width * width
plot_height = _plot_height * height
expected = grid.aspect
result = plot_height / plot_width
np.testing.assert_allclose(result, expected)
def check_constrained_axes_positions_none(grid):
rows, cols = grid.rows, grid.cols
width, height = grid.width, grid.height
tile_width, tile_height = get_tile_width(grid), get_tile_height(grid)
fig = grid.fig
indexes = list(product(range(rows - 1, -1, -1), range(cols)))
for ax, (row, col) in zip(grid.axes, indexes):
ax_bounds = get_bounds(fig, ax)
x0 = (_LEFT_PAD + col * (_HORIZONTAL_INTERNAL_PAD + tile_width)) / width
y0 = (_BOTTOM_PAD + row * (_VERTICAL_INTERNAL_PAD + tile_height)) / height
dx = tile_width / width
dy = tile_height / height
expected_bounds = [x0, y0, dx, dy]
np.testing.assert_allclose(ax_bounds, expected_bounds)
def check_constrained_axes_positions_single(grid):
rows, cols = grid.rows, grid.cols
width, height = grid.width, grid.height
cbar_location = grid.cbar_location
fig = grid.fig
left_pad, right_pad = _LEFT_PAD, _RIGHT_PAD
bottom_pad, top_pad = _BOTTOM_PAD, _TOP_PAD
if cbar_location == "left":
left_pad = _LEFT_PAD + _CBAR_THICKNESS + _LONG_SIDE_PAD
elif cbar_location == "right":
right_pad = _RIGHT_PAD + _CBAR_THICKNESS + _LONG_SIDE_PAD
elif cbar_location == "bottom":
bottom_pad = _BOTTOM_PAD + _CBAR_THICKNESS + _LONG_SIDE_PAD
elif cbar_location == "top":
top_pad = _TOP_PAD + _CBAR_THICKNESS + _LONG_SIDE_PAD
tile_width = get_tile_width(grid, left_pad=left_pad, right_pad=right_pad)
tile_height = get_tile_height(grid, bottom_pad=bottom_pad, top_pad=top_pad)
indexes = list(product(range(rows - 1, -1, -1), range(cols)))
axes = grid.axes
for ax, (row, col) in zip(axes, indexes):
ax_bounds = get_bounds(fig, ax)
x0 = (left_pad + col * (_HORIZONTAL_INTERNAL_PAD + tile_width)) / width
y0 = (bottom_pad + row * (_VERTICAL_INTERNAL_PAD + tile_height)) / height
dx = tile_width / width
dy = tile_height / height
expected_bounds = [x0, y0, dx, dy]
np.testing.assert_allclose(ax_bounds, expected_bounds)
def check_constrained_caxes_positions_single(grid):
width, height = grid.width, grid.height
cbar_location = grid.cbar_location
fig = grid.fig
cax = grid.caxes
cax_bounds = get_bounds(fig, cax)
if cbar_location == "bottom":
x0 = (_LEFT_PAD + _SHORT_SIDE_PAD) / width
y0 = _BOTTOM_PAD / height
dx = (width - _LEFT_PAD - _RIGHT_PAD - 2.0 * _SHORT_SIDE_PAD) / width
dy = _CBAR_THICKNESS / height
elif cbar_location == "right":
x0 = (width - _CBAR_THICKNESS - _RIGHT_PAD) / width
y0 = (_BOTTOM_PAD + _SHORT_SIDE_PAD) / height
dx = _CBAR_THICKNESS / width
dy = (height - _TOP_PAD - _BOTTOM_PAD - 2.0 * _SHORT_SIDE_PAD) / height
elif cbar_location == "top":
x0 = (_LEFT_PAD + _SHORT_SIDE_PAD) / width
y0 = (height - _CBAR_THICKNESS - _TOP_PAD) / height
dx = (width - _LEFT_PAD - _RIGHT_PAD - 2.0 * _SHORT_SIDE_PAD) / width
dy = _CBAR_THICKNESS / height
elif cbar_location == "left":
x0 = _LEFT_PAD / width
y0 = (_BOTTOM_PAD + _SHORT_SIDE_PAD) / height
dx = _CBAR_THICKNESS / width
dy = (height - _TOP_PAD - _BOTTOM_PAD - 2.0 * _SHORT_SIDE_PAD) / height
expected_bounds = [x0, y0, dx, dy]
np.testing.assert_allclose(cax_bounds, expected_bounds)
def check_constrained_axes_positions_each(grid):
rows, cols = grid.rows, grid.cols
width, height = grid.width, grid.height
tile_width, tile_height = get_tile_width(grid), get_tile_height(grid)
cbar_location = grid.cbar_location
fig = grid.fig
indexes = list(product(range(rows - 1, -1, -1), range(cols)))
axes = grid.axes
if cbar_location == "bottom":
for ax, (row, col) in zip(axes, indexes):
ax_bounds = get_bounds(fig, ax)
x0 = (_LEFT_PAD + col * (tile_width + _HORIZONTAL_INTERNAL_PAD)) / width
y0 = (
_BOTTOM_PAD
+ _CBAR_THICKNESS
+ _LONG_SIDE_PAD
+ row * (tile_height + _VERTICAL_INTERNAL_PAD)
) / height
dx = tile_width / width
dy = (tile_height - _CBAR_THICKNESS - _LONG_SIDE_PAD) / height
expected_bounds = [x0, y0, dx, dy]
np.testing.assert_allclose(ax_bounds, expected_bounds)
elif cbar_location == "top":
for ax, (row, col) in zip(axes, indexes):
ax_bounds = get_bounds(fig, ax)
x0 = (_LEFT_PAD + col * (_HORIZONTAL_INTERNAL_PAD + tile_width)) / width
y0 = (_BOTTOM_PAD + row * (_VERTICAL_INTERNAL_PAD + tile_height)) / height
dx = tile_width / width
dy = (tile_height - _CBAR_THICKNESS - _LONG_SIDE_PAD) / height
expected_bounds = [x0, y0, dx, dy]
np.testing.assert_allclose(ax_bounds, expected_bounds)
elif cbar_location == "right":
for ax, (row, col) in zip(axes, indexes):
ax_bounds = get_bounds(fig, ax)
x0 = (_LEFT_PAD + col * (_HORIZONTAL_INTERNAL_PAD + tile_width)) / width
y0 = (_BOTTOM_PAD + row * (_VERTICAL_INTERNAL_PAD + tile_height)) / height
dx = (tile_width - _CBAR_THICKNESS - _LONG_SIDE_PAD) / width
dy = tile_height / height
expected_bounds = [x0, y0, dx, dy]
np.testing.assert_allclose(ax_bounds, expected_bounds)
elif cbar_location == "left":
for ax, (row, col) in zip(axes, indexes):
ax_bounds = get_bounds(fig, ax)
x0 = (
_LEFT_PAD
+ _CBAR_THICKNESS
+ _LONG_SIDE_PAD
+ col * (_HORIZONTAL_INTERNAL_PAD + tile_width)
) / width
y0 = (_BOTTOM_PAD + row * (_VERTICAL_INTERNAL_PAD + tile_height)) / height
dx = (tile_width - _CBAR_THICKNESS - _LONG_SIDE_PAD) / width
dy = tile_height / height
expected_bounds = [x0, y0, dx, dy]
np.testing.assert_allclose(ax_bounds, expected_bounds)
def check_constrained_caxes_positions_each(grid):
rows, cols = grid.rows, grid.cols
width, height = grid.width, grid.height
tile_width, tile_height = get_tile_width(grid), get_tile_height(grid)
cbar_location = grid.cbar_location
fig = grid.fig
indexes = list(product(range(rows - 1, -1, -1), range(cols)))
caxes = grid.caxes
if cbar_location == "bottom":
for cax, (row, col) in zip(caxes, indexes):
cax_bounds = get_bounds(fig, cax)
x0 = (
_LEFT_PAD
+ col * (_HORIZONTAL_INTERNAL_PAD + tile_width)
+ _SHORT_SIDE_PAD
) / width
y0 = (_BOTTOM_PAD + row * (_VERTICAL_INTERNAL_PAD + tile_height)) / height
dx = (tile_width - 2.0 * _SHORT_SIDE_PAD) / width
dy = _CBAR_THICKNESS / height
expected_bounds | |
#!/usr/bin/env python
from __future__ import print_function
import cx_Oracle
import datetime
import calendar
import sys
import logging
import CondCore.Utilities.conddb_serialization_metadata as sm
import CondCore.Utilities.credentials as auth
import CondCore.Utilities.conddb_time as conddb_time
import os
authPathEnvVar = 'COND_AUTH_PATH'
prod_db_service = ('cms_orcon_prod',{'w':'cms_orcon_prod/cms_cond_general_w','r':'cms_orcon_prod/cms_cond_general_r'})
adg_db_service = ('cms_orcon_adg',{'r':'cms_orcon_adg/cms_cond_general_r'})
dev_db_service = ('cms_orcoff_prep',{'w':'cms_orcoff_prep/cms_cond_general_w','r':'cms_orcoff_prep/cms_cond_general_r'})
schema_name = 'CMS_CONDITIONS'
fmt_str = "[%(asctime)s] %(levelname)s: %(message)s"
logLevel = logging.INFO
logFormatter = logging.Formatter(fmt_str)
def print_table( headers, table ):
ws = []
for h in headers:
ws.append(len(h))
for row in table:
ind = 0
for c in row:
c = str(c)
if ind<len(ws):
if len(c)> ws[ind]:
ws[ind] = len(c)
ind += 1
def printf( row ):
line = ''
ind = 0
for w in ws:
fmt = '{:<%s}' %w
if ind<len(ws):
line += (fmt.format( row[ind] )+' ')
ind += 1
print(line)
printf( headers )
hsep = ''
for w in ws:
fmt = '{:-<%s}' %w
hsep += (fmt.format('')+' ')
print(hsep)
for row in table:
printf( row )
class version_db(object):
def __init__(self, db ):
self.db = db
self.cmssw_boost_map = {}
self.boost_run_map = []
def fetch_cmssw_boost_map( self ):
cursor = self.db.cursor()
cursor.execute('SELECT BOOST_VERSION, CMSSW_VERSION FROM CMSSW_BOOST_MAP');
rows = cursor.fetchall()
self.cmssw_boost_map = {}
for r in rows:
self.cmssw_boost_map[r[1]]=r[0]
return self.cmssw_boost_map
def fetch_boost_run_map( self ):
cursor = self.db.cursor()
cursor.execute('SELECT RUN_NUMBER, RUN_START_TIME, BOOST_VERSION, INSERTION_TIME FROM BOOST_RUN_MAP ORDER BY RUN_NUMBER, INSERTION_TIME')
rows = cursor.fetchall()
self.boost_run_map = []
for r in rows:
self.boost_run_map.append( (r[0],r[1],r[2],str(r[3])) )
return self.boost_run_map
def insert_boost_run_range( self, run, boost_version, min_ts ):
cursor = self.db.cursor()
cursor.execute('SELECT MIN(RUN_NUMBER) FROM RUN_INFO WHERE RUN_NUMBER >= :RUN',(run,))
res = cursor.fetchone()
if res is not None and res[0] is not None:
min_run = res[0]
cursor.execute('SELECT START_TIME FROM RUN_INFO WHERE RUN_NUMBER=:RUN',(min_run,))
min_run_time = cursor.fetchone()[0]
min_run_ts = calendar.timegm( min_run_time.utctimetuple() ) << 32
else:
min_run = run
min_run_ts = conddb_time.string_to_timestamp(min_ts)
now = datetime.datetime.utcnow()
cursor.execute('INSERT INTO BOOST_RUN_MAP ( RUN_NUMBER, RUN_START_TIME, BOOST_VERSION, INSERTION_TIME ) VALUES (:RUN, :RUN_START_T, :BOOST, :TIME)',(run,min_run_ts,boost_version,now) )
def insert_cmssw_boost( self, cmssw_version,boost_version ):
cursor = self.db.cursor()
cursor.execute('INSERT INTO CMSSW_BOOST_MAP ( CMSSW_VERSION, BOOST_VERSION ) VALUES ( :CMSSW_VERSION, :BOOST_VERSION )',(cmssw_version,boost_version))
def lookup_boost_in_cmssw( self, cmssw_version ):
cmssw_v = sm.check_cmssw_version( cmssw_version )
the_arch = None
releaseRoot = None
if sm.is_release_cycle( cmssw_v ):
cmssw_v = sm.strip_cmssw_version( cmssw_v )
archs = sm.get_production_arch( cmssw_v )
for arch in archs:
path = sm.get_release_root( cmssw_v, arch )
if os.path.exists(os.path.join(path,cmssw_v)):
releaseRoot = path
the_arch = arch
break
if releaseRoot is None:
for arch in archs:
the_arch = arch
releaseRoot = sm.get_release_root( cmssw_v, arch )
for r in sorted (os.listdir( releaseRoot )):
if r.startswith(cmssw_v):
cmssw_v = r
logging.debug('Boost version will be verified in release %s' %cmssw_v)
if cmssw_v in self.cmssw_boost_map.keys():
return self.cmssw_boost_map[cmssw_v]
if releaseRoot is None:
archs = sm.get_production_arch( cmssw_v )
for arch in archs:
path = sm.get_release_root( cmssw_v, arch )
if os.path.exists(os.path.join(path,cmssw_v)):
releaseRoot = path
the_arch = arch
break
logging.debug('Release path: %s' %releaseRoot)
boost_version = sm.get_cmssw_boost( the_arch, '%s/%s' %(releaseRoot,cmssw_v) )
if not boost_version is None:
self.cmssw_boost_map[cmssw_v] = boost_version
self.insert_cmssw_boost( cmssw_v,boost_version )
return boost_version
def populate_for_gts( self ):
cursor = self.db.cursor()
cursor.execute('SELECT DISTINCT(RELEASE) FROM GLOBAL_TAG')
rows = cursor.fetchall()
for r in rows:
self.lookup_boost_in_cmssw( r[0] )
class conddb_tool(object):
def __init__( self ):
self.db = None
self.version_db = None
self.args = None
self.logger = logging.getLogger()
self.logger.setLevel(logLevel)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
self.logger.addHandler(consoleHandler)
self.iovs = None
self.versionIovs = None
def connect( self ):
if self.args.db is None:
self.args.db = 'pro'
if self.args.db == 'dev' or self.args.db == 'oradev' :
db_service = dev_db_service
elif self.args.db == 'orapro':
db_service = adg_db_service
elif self.args.db != 'onlineorapro' or self.args.db != 'pro':
db_service = prod_db_service
else:
raise Exception("Database '%s' is not known." %args.db )
if self.args.accessType not in db_service[1].keys():
raise Exception('The specified database connection %s does not support the requested action.' %db_service[0])
service = db_service[1][self.args.accessType]
creds = auth.get_credentials( authPathEnvVar, service, self.args.auth )
if creds is None:
raise Exception("Could not find credentials for service %s" %service)
(username, account, pwd) = creds
connStr = '%s/%s@%s' %(username,pwd,db_service[0])
self.db = cx_Oracle.connect(connStr)
logging.info('Connected to %s as user %s' %(db_service[0],username))
self.db.current_schema = schema_name
def process_tag_boost_version( self, t, timetype, tagBoostVersion, minIov, timeCut, validate ):
if self.iovs is None:
self.iovs = []
cursor = self.db.cursor()
stmt = 'SELECT IOV.SINCE SINCE, IOV.INSERTION_TIME INSERTION_TIME, P.STREAMER_INFO STREAMER_INFO FROM TAG, IOV, PAYLOAD P WHERE TAG.NAME = IOV.TAG_NAME AND P.HASH = IOV.PAYLOAD_HASH AND TAG.NAME = :TAG_NAME'
params = (t,)
if timeCut and tagBoostVersion is not None and not validate:
whereClauseOnSince = ' AND IOV.INSERTION_TIME>:TIME_CUT'
stmt = stmt + whereClauseOnSince
params = params + (timeCut,)
stmt = stmt + ' ORDER BY SINCE'
logging.debug('Executing: "%s"' %stmt)
cursor.execute(stmt,params)
for r in cursor:
streamer_info = str(r[2].read())
self.iovs.append((r[0],r[1],streamer_info))
niovs = 0
self.versionIovs = []
lastBoost = None
update = False
if tagBoostVersion is not None:
update = True
for iov in self.iovs:
if validate and timeCut is not None and timeCut < iov[1]:
continue
niovs += 1
iovBoostVersion, tagBoostVersion = sm.update_tag_boost_version( tagBoostVersion, minIov, iov[2], iov[0], timetype, self.version_db.boost_run_map )
if minIov is None or iov[0]<minIov:
minIov = iov[0]
logging.debug('iov: %s - inserted on %s - streamer: %s' %(iov[0],iov[1],iov[2]))
logging.debug('current tag boost version: %s minIov: %s' %(tagBoostVersion,minIov))
if lastBoost is None or lastBoost!=iovBoostVersion:
self.versionIovs.append((iov[0],iovBoostVersion))
lastBoost = iovBoostVersion
if tagBoostVersion is None:
if niovs == 0:
logging.warning( 'No iovs found. boost version cannot be determined.')
return None, None
else:
logging.error('Could not determine the tag boost version.' )
return None, None
else:
if niovs == 0:
logging.info('Tag boost version has not changed.')
else:
msg = 'Found tag boost version %s ( min iov: %s ) combining payloads from %s iovs' %(tagBoostVersion,minIov,niovs)
if timeCut is not None:
if update:
msg += ' (iov insertion time>%s)' %str(timeCut)
else:
msg += ' (iov insertion time<%s)' %str(timeCut)
logging.info( msg )
return tagBoostVersion, minIov
def validate_boost_version( self, t, timetype, tagBoostVersion ):
cursor = self.db.cursor()
cursor.execute('SELECT GT.NAME, GT.RELEASE, GT.SNAPSHOT_TIME FROM GLOBAL_TAG GT, GLOBAL_TAG_MAP GTM WHERE GT.NAME = GTM.GLOBAL_TAG_NAME AND GTM.TAG_NAME = :TAG_NAME',(t,))
rows = cursor.fetchall()
invalid_gts = []
ngt = 0
gts = []
for r in rows:
gts.append((r[0],r[1],r[2]))
if len(gts)>0:
logging.info('validating %s gts.' %len(gts))
boost_snapshot_map = {}
for gt in gts:
ngt += 1
logging.debug('Validating for GT %s (release %s)' %(gt[0],gt[1]))
gtCMSSWVersion = sm.check_cmssw_version( gt[1] )
gtBoostVersion = self.version_db.lookup_boost_in_cmssw( gtCMSSWVersion )
if sm.cmp_boost_version( gtBoostVersion, tagBoostVersion )<0:
logging.warning( 'The boost version computed from all the iovs in the tag (%s) is incompatible with the gt [%s] %s (consuming ver: %s, snapshot: %s)' %(tagBoostVersion,ngt,gt[0],gtBoostVersion,str(gt[2])))
if str(gt[2]) not in boost_snapshot_map.keys():
tagSnapshotBoostVersion = None
minIov = None
tagSnapshotBoostVersion, minIov = self.process_tag_boost_version(t, timetype, tagSnapshotBoostVersion, minIov, gt[2])
if tagSnapshotBoostVersion is not None:
boost_snapshot_map[str(gt[2])] = tagSnapshotBoostVersion
else:
continue
else:
tagSnapshotBoostVersion = boost_snapshot_map[str(gt[2])]
if sm.cmp_boost_version( gtBoostVersion, tagSnapshotBoostVersion )<0:
logging.error('The snapshot from tag used by gt %s (consuming ver: %s) has an incompatible combined boost version %s' %(gt[0],gtBoostVersion,tagSnapshotBoostVersion))
invalid_gts.append( ( gt[0], gtBoostVersion ) )
if len(invalid_gts)==0:
if ngt>0:
logging.info('boost version for the tag validated in %s referencing Gts' %(ngt))
else:
logging.info('No GT referencing this tag found.')
else:
logging.error( 'boost version for the tag is invalid.')
return invalid_gts
def update_tag_boost_version_in_db( self, t, tagBoostVersion, minIov, update ):
cursor = self.db.cursor()
now = datetime.datetime.utcnow()
if update:
cursor.execute('UPDATE TAG_METADATA SET MIN_SERIALIZATION_V=:BOOST_V, MIN_SINCE=:MIN_IOV, MODIFICATION_TIME=:NOW WHERE TAG_NAME = :NAME',( tagBoostVersion,minIov,now,t))
else:
cursor.execute('INSERT INTO TAG_METADATA ( TAG_NAME, MIN_SERIALIZATION_V, MIN_SINCE, MODIFICATION_TIME ) VALUES ( :NAME, :BOOST_V, :MIN_IOV, :NOW )',(t, tagBoostVersion,minIov,now))
logging.info('Minimum boost version for the tag updated.')
def update_tags( self ):
cursor = self.db.cursor()
self.version_db = version_db( self.db )
self.version_db.fetch_cmssw_boost_map()
self.version_db.fetch_boost_run_map()
tags = {}
wpars = ()
if self.args.name is not None:
stmt0 = 'SELECT NAME FROM TAG WHERE NAME = :TAG_NAME'
wpars = (self.args.name,)
cursor.execute(stmt0,wpars);
rows = cursor.fetchall()
found = False
for r in rows:
found = True
break
if not found:
raise Exception('Tag %s does not exists in the database.' %self.args.name )
tags[self.args.name] = None
stmt1 = 'SELECT MIN_SERIALIZATION_V, MIN_SINCE, CAST(MODIFICATION_TIME AS TIMESTAMP(0)) FROM TAG_METADATA WHERE TAG_NAME = :NAME'
cursor.execute(stmt1,wpars);
rows = cursor.fetchall()
for r in rows:
tags[self.args.name] = (r[0],r[1],r[2])
else:
stmt0 = 'SELECT NAME FROM TAG WHERE NAME NOT IN ( SELECT TAG_NAME FROM TAG_METADATA) ORDER BY NAME'
nmax = 100
if self.args.max is not None:
nmax = self.args.max
if self.args.all:
nmax = | |
value/colors supported for now
if (def_val_dim == 1 or def_val_dim == 4) and default_value.count(None) < def_val_dim:
sampler_name = prefix + action.name + "_mat_node_anim"
if getIndex(samplers, sampler_name) == -1:
sampler = {}
interpolation = animateGetInterpolation(exportSettings, default_value)
sampler['interpolation'] = interpolation
if interpolation == 'CONVERSION_NEEDED':
sampler['interpolation'] = 'LINEAR'
def_val_data, in_tangent_data, out_tangent_data = animateDefaultValue(exportSettings,
default_value, interpolation)
keys = sorted(def_val_data.keys())
values = []
final_keys = []
key_offset = 0.0
if len(keys) > 0 and exportSettings['move_keyframes']:
key_offset = bpy.context.scene.frame_start / bpy.context.scene.render.fps
for key in keys:
if key - key_offset < 0.0:
continue
final_keys.append(key - key_offset)
if interpolation == 'CUBICSPLINE':
for i in range(0, def_val_dim):
values.append(in_tangent_data[key][i])
for i in range(0, def_val_dim):
values.append(def_val_data[key][i])
if interpolation == 'CUBICSPLINE':
for i in range(0, def_val_dim):
values.append(out_tangent_data[key][i])
componentType = "FLOAT"
count = len(final_keys)
type = "SCALAR"
input = gltf.generateAccessor(glTF, exportSettings['binary'],
final_keys, componentType, count, type, "")
sampler['input'] = input
componentType = "FLOAT"
count = len(values) // def_val_dim
if def_val_dim == 1:
type = "SCALAR"
else:
type = "VEC4"
output = gltf.generateAccessor(glTF, exportSettings['binary'],
values, componentType, count, type, "")
sampler['output'] = output
sampler['name'] = sampler_name
samplers.append(sampler)
if energy.count(None) < 1:
sampler_name = prefix + action.name + '_energy'
if getIndex(samplers, sampler_name) == -1:
sampler = {}
interpolation = animateGetInterpolation(exportSettings, energy)
sampler['interpolation'] = interpolation
if interpolation == 'CONVERSION_NEEDED':
sampler['interpolation'] = 'LINEAR'
energy_data, in_tangent_data, out_tangent_data = animateEnergy(exportSettings,
energy, interpolation)
keys = sorted(energy_data.keys())
values = []
final_keys = []
key_offset = 0.0
if len(keys) > 0 and exportSettings['move_keyframes']:
key_offset = bpy.context.scene.frame_start / bpy.context.scene.render.fps
for key in keys:
if key - key_offset < 0.0:
continue
final_keys.append(key - key_offset)
if interpolation == 'CUBICSPLINE':
values.append(in_tangent_data[key][0])
values.append(energy_data[key][0])
if interpolation == 'CUBICSPLINE':
values.append(out_tangent_data[key][0])
componentType = "FLOAT"
count = len(final_keys)
type = "SCALAR"
input = gltf.generateAccessor(glTF, exportSettings['binary'],
final_keys, componentType, count, type, "")
sampler['input'] = input
componentType = "FLOAT"
count = len(values)
type = "SCALAR"
output = gltf.generateAccessor(glTF, exportSettings['binary'],
values, componentType, count, type, "")
sampler['output'] = output
sampler['name'] = sampler_name
samplers.append(sampler)
processed_paths = []
# gather fcurves in data dict
for bl_fcurve in action.fcurves:
node_name = getNameInBrackets(bl_fcurve.data_path)
if node_name != None and not is_morph_data:
if (node_type == 'JOINT' or node_type == 'MAT_NODE') and used_node_name != node_name:
continue
elif node_type == 'NODE' or node_type == 'NODE_X_90':
continue
else:
prefix = node_name + "_"
postfix = "_" + node_name
data_path = getAnimParam(bl_fcurve.data_path)
if data_path == 'location':
path = 'translation'
if path in processed_paths:
continue
processed_paths.append(path)
sampler_name = prefix + action.name + '_' + path
generateAnimChannel(glTF, bl_obj, sampler_name, path, bl_node_name + postfix, samplers, channels)
elif (data_path == 'rotation_axis_angle' or data_path == 'rotation_euler' or
data_path == 'rotation_quaternion'):
path = 'rotation'
if path in processed_paths:
continue
processed_paths.append(path)
sampler_name = prefix + action.name + '_' + path
generateAnimChannel(glTF, bl_obj, sampler_name, path, bl_node_name + postfix, samplers, channels)
elif data_path == 'scale':
path = 'scale'
if path in processed_paths:
continue
processed_paths.append(path)
sampler_name = prefix + action.name + '_' + path
generateAnimChannel(glTF, bl_obj, sampler_name, path, bl_node_name + postfix, samplers, channels)
elif data_path == 'value':
path = 'weights'
if path in processed_paths:
continue
processed_paths.append(path)
sampler_name = prefix + action.name + '_' + path
generateAnimChannel(glTF, bl_obj, sampler_name, path, bl_node_name + postfix, samplers, channels)
elif data_path == 'default_value':
if def_val_dim == 1:
path = 'material.nodeValue["' + used_node_name + '"]'
else:
path = 'material.nodeRGB["' + used_node_name + '"]'
if path in processed_paths:
continue
processed_paths.append(path)
sampler_name = prefix + action.name + '_mat_node_anim'
channel = generateAnimChannel(glTF, bl_obj, sampler_name, path, bl_node_name, samplers, channels)
if bl_mat_name != None:
channel['target']['extras'] = {
'material': gltf.getMaterialIndex(glTF, bl_mat_name)
}
elif data_path == 'energy':
path = 'intensity'
if path in processed_paths:
continue
processed_paths.append(path)
sampler_name = prefix + action.name + '_energy'
generateAnimChannel(glTF, bl_obj, sampler_name, path, bl_node_name, samplers, channels)
#
# Property: animations
#
def generateAnimations(operator, context, exportSettings, glTF):
"""
Generates the top level animations, channels and samplers entry.
"""
animations = []
channels = []
samplers = []
filtered_objects_with_dg = exportSettings['filtered_objects_with_dg']
bl_backup_action = {}
if exportSettings['bake_armature_actions']:
start = None
end = None
for current_bl_action in bpy.data.actions:
# filter out non-object actions
if current_bl_action.id_root != 'OBJECT':
continue
for current_bl_fcurve in current_bl_action.fcurves:
if current_bl_fcurve is None:
continue
if start == None:
start = current_bl_fcurve.range()[0]
else:
start = min(start, current_bl_fcurve.range()[0])
if end == None:
end = current_bl_fcurve.range()[1]
else:
end = max(end, current_bl_fcurve.range()[1])
if start is None or end is None or exportSettings['frame_range']:
start = bpy.context.scene.frame_start
end = bpy.context.scene.frame_end
for bl_obj in filtered_objects_with_dg:
if bl_obj.animation_data is not None:
bl_backup_action[bl_obj.name] = bl_obj.animation_data.action
if bl_obj.pose is None:
continue
obj_scene = getSceneByObject(bl_obj)
if obj_scene is not None:
prev_active_scene = bpy.context.scene
bpy.context.window.scene = obj_scene
setSelectedObject(bl_obj)
bpy.ops.nla.bake(frame_start=start, frame_end=end,
only_selected=False, visual_keying=True)
restoreSelectedObjects()
bpy.context.window.scene = prev_active_scene
for bl_obj in filtered_objects_with_dg:
if bl_obj.animation_data is None:
continue
bl_action = bl_obj.animation_data.action
if bl_action is None:
continue
generateAnimationsParameter(operator, context, exportSettings, glTF, bl_action,
channels, samplers, bl_obj, None, None, None, bl_obj.rotation_mode,
mathutils.Matrix.Identity(4), mathutils.Matrix.Identity(4), False)
if exportSettings['skins']:
if bl_obj.type == 'ARMATURE' and len(bl_obj.pose.bones) > 0:
# Precalculate joint animation data.
start = None
end = None
for current_bl_action in bpy.data.actions:
# filter out non-object actions
if current_bl_action.id_root != 'OBJECT':
continue
for current_bl_fcurve in current_bl_action.fcurves:
if current_bl_fcurve is None:
continue
if start == None:
start = current_bl_fcurve.range()[0]
else:
start = min(start, current_bl_fcurve.range()[0])
if end == None:
end = current_bl_fcurve.range()[1]
else:
end = max(end, current_bl_fcurve.range()[1])
if start is None or end is None:
start = bpy.context.scene.frame_start
end = bpy.context.scene.frame_end
for frame in range(int(start), int(end) + 1):
bpy.context.scene.frame_set(frame)
for bl_bone in bl_obj.pose.bones:
matrix_basis = bl_bone.matrix_basis
correction_matrix_local = bl_bone.bone.matrix_local.copy()
if bl_bone.parent is not None:
correction_matrix_local = bl_bone.parent.bone.matrix_local.inverted() @ correction_matrix_local
if not exportSettings['joint_cache'].get(bl_bone.name):
exportSettings['joint_cache'][bl_bone.name] = {}
if exportSettings['bake_armature_actions']:
matrix_basis = bl_obj.convert_space(pose_bone=bl_bone, matrix=bl_bone.matrix, from_space='POSE', to_space='LOCAL')
matrix = correction_matrix_local @ matrix_basis
tmp_location, tmp_rotation, tmp_scale = decomposeTransformSwizzle(matrix)
exportSettings['joint_cache'][bl_bone.name][float(frame)] = [tmp_location, tmp_rotation, tmp_scale]
for bl_bone in bl_obj.pose.bones:
matrix_basis = bl_bone.matrix_basis
correction_matrix_local = bl_bone.bone.matrix_local.copy()
if bl_bone.parent is not None:
correction_matrix_local = bl_bone.parent.bone.matrix_local.inverted() @ correction_matrix_local
if exportSettings['bake_armature_actions']:
matrix_basis = bl_obj.convert_space(pose_bone=bl_bone, matrix=bl_bone.matrix, from_space='POSE', to_space='LOCAL')
generateAnimationsParameter(operator, context, exportSettings, glTF,
bl_action, channels, samplers, bl_obj, bl_bone.name,
None, None, bl_bone.rotation_mode, correction_matrix_local,
matrix_basis, False)
# export morph targets animation data
processed_meshes = []
for bl_obj in filtered_objects_with_dg:
if bl_obj.type != 'MESH' or bl_obj.data is None:
continue
bl_mesh = bl_obj.data
if bl_mesh in processed_meshes:
continue
if bl_mesh.shape_keys is None or bl_mesh.shape_keys.animation_data is None:
continue
bl_action = bl_mesh.shape_keys.animation_data.action
if bl_action is None:
continue
generateAnimationsParameter(operator, context, exportSettings, glTF, bl_action,
channels, samplers, bl_obj, None, None, None, bl_obj.rotation_mode,
mathutils.Matrix.Identity(4), mathutils.Matrix.Identity(4), True)
processed_meshes.append(bl_mesh)
# export light animation
for bl_obj in filtered_objects_with_dg:
if bl_obj.type != 'LIGHT' or bl_obj.data is None:
continue
bl_light = bl_obj.data
if bl_light.animation_data is None:
continue
bl_action = bl_light.animation_data.action
if bl_action is None:
continue
generateAnimationsParameter(operator, context, exportSettings, glTF, bl_action,
channels, samplers, bl_obj, None, None, None, bl_obj.rotation_mode,
mathutils.Matrix.Identity(4), mathutils.Matrix.Identity(4), True)
# export material animation
for bl_obj in filtered_objects_with_dg:
# export morph targets animation data.
if bl_obj.type != 'MESH' or bl_obj.data is None:
continue
bl_mesh = bl_obj.data
for bl_mat in bl_mesh.materials:
if bl_mat == None:
continue
if bl_mat.node_tree == None or bl_mat.node_tree.animation_data == None:
continue
bl_action = bl_mat.node_tree.animation_data.action
if bl_action == None:
continue
correction_matrix_local = mathutils.Matrix.Identity(4)
matrix_basis = mathutils.Matrix.Identity(4)
node_names = [n.name for n in bl_mat.node_tree.nodes]
for name in node_names:
generateAnimationsParameter(operator, context, exportSettings, glTF,
bl_action, channels, samplers, bl_obj, None,
bl_mat.name, name, bl_obj.rotation_mode,
correction_matrix_local, matrix_basis, False)
if exportSettings['bake_armature_actions']:
for bl_obj in filtered_objects_with_dg:
if bl_backup_action.get(bl_obj.name) is not None:
bl_obj.animation_data.action = bl_backup_action[bl_obj.name]
if len(channels) > 0 or len(samplers) > 0:
# collect channel/samplers by node
anim_data = {}
for channel in channels:
bl_obj = channel['bl_obj']
name = bl_obj.name
# shallow copy (might be repetitions, need to find out why)
sampler = samplers[channel['sampler']].copy()
if not name in anim_data:
anim_data[name] = [[], [], None]
# fix sampler index in new array
channel['sampler'] = len(anim_data[name][1])
# sampler 'name' is used to gather the index. However, 'name' is
# no property of sampler and has to be removed.
del sampler['name']
anim_data[name][0].append(channel)
anim_data[name][1].append(sampler)
anim_data[name][2] = bl_obj
del channel['bl_obj']
for name, data in anim_data.items():
animation = {
'name': name,
'channels' : data[0],
'samplers' : data[1]
}
v3dExt = gltf.appendExtension(glTF, 'S8S_v3d_animation_data', animation)
bl_obj = data[2]
v3dExt['auto'] = bl_obj.v3d.anim_auto
v3dExt['loop'] = bl_obj.v3d.anim_loop
v3dExt['repeatInfinite'] = bl_obj.v3d.anim_repeat_infinite
v3dExt['repeatCount'] = bl_obj.v3d.anim_repeat_count
# frame to sec
v3dExt['offset'] = animateConvertKeys([bl_obj.v3d.anim_offset])[0]
| |
from .geometry import near, isnear, orient2d, inrng, slide
import numpy as np
import math
class vec3:
@classmethod
def O(cls):
return cls(0, 0, 0)
@classmethod
def U(cls, r=1):
return cls(r, r, r)
@classmethod
def X(cls, r=1):
return cls(r, 0, 0)
@classmethod
def Y(cls, r=1):
return cls(0, r, 0)
@classmethod
def Z(cls, r=1):
return cls(0, 0, r)
@classmethod
def nX(cls, r=1):
return cls(-r, 0, 0)
@classmethod
def nY(cls, r=1):
return cls(0, -r, 0)
@classmethod
def nZ(cls, r=1):
return cls(0, 0, -r)
@classmethod
def com(cls, pts):
xs, ys, zs = zip(*[(p.x, p.y, p.z) for p in pts])
return cls(sum(xs) / len(xs), sum(ys) / len(ys), sum(zs) / len(zs))
def isnan(self):
return math.isnan(self.x) or math.isnan(self.y) or math.isnan(self.z)
def isO(self):
return self.x == 0 and self.y == 0 and self.z == 0
def xy(self):
return vec3(self.x, self.y, 0)
def yz(self):
return vec3(0, self.y, self.z)
def zx(self):
return vec3(self.x, 0, self.z)
def cp(self):
return vec3(self.x, self.y, self.z)
def fp(self):
return vec3(-self.x, -self.y, -self.z)
def inv(self):
return vec3(1 / self.x, 1 / self.y, 1 / self.z)
@classmethod
def sum(cls, pts):
r = vec3.O()
for p in pts:
r.trn(p)
return r
def __add__(self, o):
if isinstance(o, vec3):
return vec3(self.x + o.x, self.y + o.y, self.z + o.z)
else:
return vec3(self.x + o, self.y + o, self.z + o)
def __sub__(self, o):
if isinstance(o, vec3):
return vec3(self.x - o.x, self.y - o.y, self.z - o.z)
else:
return vec3(self.x - o, self.y - o, self.z - o)
def __mul__(self, o):
if isinstance(o, vec3):
return vec3(self.x * o.x, self.y * o.y, self.z * o.z)
else:
return vec3(self.x * o, self.y * o, self.z * o)
def __iter__(self):
yield self.x
yield self.y
yield self.z
def scl(self, o):
if isinstance(o, vec3):
return self.set(self.x * o.x, self.y * o.y, self.z * o.z)
else:
return self.set(self.x * o, self.y * o, self.z * o)
def sclps(self, os):
for o in os:
o.set(self.x * o.x, self.y * o.y, self.z * o.z)
return os
def quant(self, n=4):
return vec3(round(self.x, 4), round(self.y, 4), round(self.z, 4))
def __repr__(self):
return 'vec3({:.4f}, {:.4f}, {:.4f})'.format(self.x, self.y, self.z)
def __init__(self, x, y, z):
self.set(x, y, z)
def setto(self, o):
return self.set(o.x, o.y, o.z)
def set(self, x, y, z):
self.x, self.y, self.z = x, y, z
return self
def near(self, o, e=0.00001):
"""Snap to other if sufficiently near."""
raise NotImplementedError
# TODO: this will snap for any axis despite the others...
self.set(near(self.x, o.x, e),
near(self.y, o.y, e),
near(self.z, o.z, e))
def isnear(self, o, e=0.00001):
if isnear(self.x, o.x, e):
if isnear(self.y, o.y, e):
if isnear(self.z, o.z, e):
return True
return False
def ang(self, o):
cosa = (self.dot(o) / (self.mag() * o.mag()))
cosa = min(max(cosa, -1), 1)
return np.arccos(cosa)
def axy(self, o):
return self.xy().ang(o.xy())
def saxy(self, o):
a = (np.arctan2(self.x, self.y) - np.arctan2(o.x, o.y))
return a + 2 * np.pi if a < 0 else a
def d(self, o):
return (o - self).mag()
def dxy(self, o):
dx = o.x - self.x
dy = o.y - self.y
return np.sqrt(dx ** 2 + dy ** 2)
def dexy(self, u, v):
uv = (v - u)
c = self.dot(uv)
if c < u.dot(uv):
return self.dxy(u)
elif c > v.dot(uv):
return self.dxy(v)
else:
nm = uv.crs(vec3.Z())
return abs(self.dot(nm) - u.dot(nm))
def dlxy(self, loop):
return min([self.dexy(u, v) for u, v in slide(loop, 2)])
def tov(self, o):
return o - self
def rot(self, q):
x = self.x * (q.w ** 2 + q.x ** 2 - q.y ** 2 - q.z ** 2) +\
self.y * (2 * (q.x * q.y - q.w * q.z)) +\
self.z * (2 * (q.x * q.z + q.w * q.y))
y = self.x * (2 * (q.x * q.y + q.w * q.z)) +\
self.y * (q.w ** 2 - q.x ** 2 + q.y ** 2 - q.z ** 2) +\
self.z * (2 * (q.y * q.z - q.w * q.x))
z = self.x * (2 * (q.x * q.z - q.w * q.y)) +\
self.y * (2 * (q.y * q.z + q.w * q.x)) +\
self.z * (q.w ** 2 - q.x ** 2 - q.y ** 2 + q.z ** 2)
return self.set(x, y, z)
def trn(self, o):
return self.set(self.x + o.x, self.y + o.y, self.z + o.z)
def xtrn(self, dx):
return self.set(self.x + dx, self.y, self.z)
def ytrn(self, dy):
return self.set(self.x, self.y + dy, self.z)
def ztrn(self, dz):
return self.set(self.x, self.y, self.z + dz)
def trnps(self, os, cp=False):
for o in os:
o.set(self.x + o.x, self.y + o.y, self.z + o.z)
return os
def trnpy(self, py):
return (self.trnps(py[0]), [self.trnps(h) for h in py[1]])
def crs(self, o):
return vec3(self.y * o.z - self.z * o.y,
self.z * o.x - self.x * o.z,
self.x * o.y - self.y * o.x)
def dot(self, o):
return self.x * o.x + self.y * o.y + self.z * o.z
def mag(self):
return np.sqrt(self.dot(self))
def nrm(self):
mag = self.mag()
if mag > 0:
return vec3(self.x / mag, self.y / mag, self.z / mag)
else:
return vec3.O()
def nrmd(self):
mag = self.mag()
if mag > 0:
return self.set(self.x / mag, self.y / mag, self.z / mag)
else:
return self
def lerp(self, o, d):
return self + (self.tov(o) * d)
def ring(self, r, n, inscribe=True):
from .quat import quat
alpha = np.pi * (2.0 / n)
sr = r if inscribe else r / np.cos(alpha / 2.0)
z = vec3(0, 0, 1)
loop = []
for x in range(n):
loop.append(vec3(sr, 0, 0).rot(quat.av(x * alpha - alpha / 2.0, z)))
return [p.trn(self) for p in loop]
def line(self, o, n, ends=False):
line = []
if ends:
line.append(self.cp())
for i in range(n):
t = (i + 1) / (n + 1)
line.append(self.lerp(o, t))
if ends:
line.append(o.cp())
return line
def spline(self, o, st, ot, n, alpha=0.5):
s = self.d(o) / n
st.scl(s)
ot.scl(s)
#n += 1
n -= 1
ps = [self, self + st, o + ot, o]
x, y, z = zip(*[(p.x, p.y, p.z) for p in ps])
t = np.cumsum([0] + [u.d(v) ** alpha for u, v in slide(ps, 2, 1)])
x = self.catmull(x, t, n)[1:-1]
y = self.catmull(y, t, n)[1:-1]
z = self.catmull(z, t, n)[1:-1]
return [vec3(x, y, z) for x, y, z in zip(x, y, z)]
def catmull(self, xs, t, n):
def coordinate(xs, t, k):
l01 = xs[0]*(t[1] - k)/(t[1] - t[0]) + xs[1]*(k - t[0])/(t[1] - t[0])
l12 = xs[1]*(t[2] - k)/(t[2] - t[1]) + xs[2]*(k - t[1])/(t[2] - t[1])
l23 = xs[2]*(t[3] - k)/(t[3] - t[2]) + xs[3]*(k - t[2])/(t[3] - t[2])
l012 = l01*(t[2] - k)/(t[2] - t[0]) + l12*(k - t[0])/(t[2] - t[0])
l123 = l12*(t[3] - k)/(t[3] - t[1]) + l23*(k - t[1])/(t[3] - t[1])
c12 = l012*(t[2] - k)/(t[2] - t[1]) + l123*(k - t[1])/(t[2] - t[1])
return c12
curve = [xs[0]]
for i in range(1, len(xs) - 2):
for j in range(n):
k = t[1] + (j / n) * (t[2] - t[1])
x = coordinate(xs[i - 1:i + 3], t[i - 1:i + 3], k)
curve.append(x)
curve.append(xs[-2])
curve.append(xs[-1])
return curve
def fan(self, r, n, inscribe=True):
ring = self.ring(r, n, inscribe)
return [(self, ring[i - 1], ring[i]) for i in range(n)]
def insxy(self, u, v, e=0.00001):
return self.onsxy(u, v, ie=False, e=e)
'''
if isnear(orient2d(self, u, v, epsilon=e), 0):
uv = u.tov(v)
u_uv = u.dot(uv)
v_uv = v.dot(uv)
self_uv = self.dot(uv)
if v_uv < u_uv:
u_uv, v_uv = v_uv, u_uv
return u_uv <= self_uv and self_uv <= v_uv
else:
return False
'''
def onsxy(self, u, v, ie=False, e=0.00001):
perp = self.leftof(u, v, e)
if perp == 0:
t = (v - u).nrm()
a, b, c = u.dot(t), self.dot(t), v.dot(t)
return (ie and (a == b or b == c)) or inrng(b, a, c)
else:
return False
#e = gtl.epsilon_c
e = 0.00001
if orient2d(self, u, v) == 0:
tn = (v - u)
a, b, c = self.dot(tn), u.dot(tn), v.dot(tn)
try:
b, c = | |
<filename>azsmart/defaults.py
"""
Contains default configuration. To override this just
set orca definitions in the calling notebook or script.
Defines data model for running azsmart simulation
w/ orca and urbansim.
Note: the actually loading of dating tables from
h5 sources will be done via TableLoad template/configs.
"""
from __future__ import division, print_function
import os
import numpy as np
import pandas as pd
import orca
from azsmart.framework import *
from smartpy_core.wrangling import *
def load_defaults():
########################
# default settings
########################
# default version/scenario
# this is an idea I'm still working out
orca.add_injectable('sim_version', 'proj1819')
@orca.injectable()
def config_root(sim_version):
"""
Root directory for configuration files.
"""
curr_dir = os.path.dirname(__file__)
return curr_dir.replace(
'azsmart\\azsmart', 'azsmart\\configs\\{}'.format(sim_version))
# the assumed base year
orca.add_injectable('base_year', 2018)
# the assumed end year
orca.add_injectable('end_year', 2055)
@orca.injectable()
def year(base_year):
"""
The current year. This will be the base unless called within
the context of a run.
"""
if 'iter_var' in orca.list_injectables():
year = orca.get_injectable('iter_var')
if year is not None:
return year
return base_year
# Adding a couple of lists of MPAs for convenient use
orca.add_injectable(
'mag_mpas',
[
'AJ','FL','GR','MA','PC','QC','AV','BU','CA','CC','CH','EL',
'FM','FH','GB','GI','GL','GO','GU','LP','CO','ME','PA','PE',
'PH','SA','SC','SU','TE','TO','WI','YO'
])
orca.add_injectable(
'cag_mpas',
[
'AJ','FL','GR','MA','PC','QC','KE','MM','MR','SP','WK',
'AK','SN','CG','CL','EY','TC'
])
@orca.injectable()
def all_mpas(mag_mpas, cag_mpas):
mpas = list(set(mag_mpas + cag_mpas))
mpas.sort()
return mpas
##################################
# MSA level control totals
##################################
@orca.table(cache=True, cache_scope='forever')
def msa_hh_pop_controls(county_hh_pop_controls):
"""
Aggregates household pop controls to the msa.
"""
return county_hh_pop_controls.local.groupby(level=0).sum()
@orca.table(cache=True, cache_scope='forever')
def msa_seasonal_controls(county_seasonal_controls):
"""
Aggregates county seasonal controls to the MSA.
"""
return county_seasonal_controls.local.groupby(level=0).sum()
@orca.table(cache=True, cache_scope='forever')
def msa_gq_controls(county_gq_controls):
"""
Aggregates county gq controls to the MSA.
"""
return county_gq_controls.local.groupby(['year', 'gq_type']).sum().reset_index(level=1)
@orca.table(cache=True, cache_scope='forever')
def msa_transient_controls(county_transient_controls):
"""
Aggregates county transient controls to the MSA.
"""
return county_transient_controls.local.groupby(['year', 'transient_type']).sum()
@orca.table(cache=True, cache_scope='forever')
def msa_emp_controls(county_emp_controls):
"""
Aggregates employment to the MSA.
"""
return county_emp_controls.local.groupby(
['year', 'qcew_naics', 'job_class'])['emp'].sum().reset_index(level=[1, 2])
#################
# parcels schema
#################
@orca.column('parcels')
def place_key(parcels, year):
"""
Year dependent combination of place and county.
"""
if year == 2017:
return parcels['county'] + '_' + parcels['city_17']
else:
return parcels['county'] + '_' + parcels['city']
@orca.column('parcels', cache=True, cache_scope='iteration')
def bldg_sqft(parcels, buildings):
"""
Total built square feet per parcel.
"""
b = buildings.to_frame(['parcel_id', 'total_sqft'])
return b.groupby('parcel_id')['total_sqft'].sum().reindex(parcels.index).fillna(0)
@orca.column('parcels', cache=True, cache_scope='iteration')
def bldg_far(parcels, buildings):
"""
Total build floor-area-ratio for the parcel.
"""
return fill_nulls(parcels['bldg_sqft'] / parcels['area'])
@orca.column('parcels', cache=True, cache_scope='iteration')
def posths_enrollment(parcels, posths):
"""
Post high school enrollment.
"""
p = posths.local
return p.groupby(
'parcel_id')['enrollment'].sum().reindex(parcels.index).fillna(0)
@orca.column('parcels', cache=True, cache_scope='iteration')
def k6_enrollment(parcels, k12):
"""
Kinder through 6th grade enrollment (elementary).
"""
k = k12.local
return k.groupby(
'parcel_id')['K6'].sum().reindex(parcels.index).fillna(0)
@orca.column('parcels', cache=True, cache_scope='iteration')
def g7_8_enrollment(parcels, k12):
"""
Enrollment for grades 7-8 (middle).
"""
k = k12.local
return k.groupby(
'parcel_id')['G7_8'].sum().reindex(parcels.index).fillna(0)
@orca.column('parcels', cache=True, cache_scope='iteration')
def g9_12_enrollment(parcels, k12):
"""
Enrollment for grades 9-12 (high school).
"""
k = k12.local
return k.groupby(
'parcel_id')['G9_12'].sum().reindex(parcels.index).fillna(0)
@orca.column('parcels', cache=True, cache_scope='iteration')
def k12_enrollment(parcels):
"""
Enrollment for grades kinder through 12th.
"""
return parcels['k6_enrollment'] + parcels['g7_8_enrollment'] + parcels['g9_12_enrollment']
@orca.column('parcels', cache=True, cache_scope='iteration')
def enrollment_all(parcels):
"""
Enrollment for all grades
"""
return parcels['k12_enrollment'] + parcels['posths_enrollment']
@orca.column('parcels', cache=True)
def is_MC(parcels):
"""
Dummy for Maricopa County.
"""
return (parcels.county == 'MC').astype(int)
@orca.column('parcels')
def is_PC(parcels):
"""
Dummy for Pinal County.
"""
return (parcels.county == 'PC').astype(int)
@orca.column('parcels', cache=True)
def is_tribe(parcels):
"""
Dummy for tribal areas.
"""
tribal_mpas = ['AK', 'FM', 'GR', 'SA', 'SN', 'TC']
return parcels['mpa'].isin(tribal_mpas).astype(int)
@orca.column('parcels', cache=True)
def east_valley(parcels):
"""
Dummy for presence in East Valley.
"""
in_ev = parcels['mpa'].isin([
'AJ', 'CA', 'CC', 'CH', 'FH', 'FM', 'GC', 'GI',
'GU', 'ME', 'PA', 'QC', 'SA', 'SC', 'TE'
])
return (parcels['is_MC'] & in_ev).astype(int)
@orca.column('parcels', cache=True)
def west_valley(parcels):
"""
Dummy for presence in West Valley.
"""
in_wv = parcels['mpa'].isin([
'AV', 'BU', 'EL', 'GB', 'GL', 'GO', 'LP', 'PE', 'SU', 'TO', 'WI', 'YO'
])
return (parcels['is_MC'] & in_wv).astype(int)
@orca.column('parcels', cache=True)
def mpa_sc(parcels):
"""
Dummy for presence in Scottsdale.
"""
return (parcels['mpa'] == 'SC').astype(int)
@orca.column('parcels', cache=True)
def mpa_ch(parcels):
"""
Dummy for presence in Chandler.
"""
return (parcels['mpa'] == 'CH').astype(int)
@orca.column('parcels', cache=True)
def mpa_ph(parcels):
"""
Dummy for presence in Phoenix.
"""
return (parcels['mpa'] == 'PH').astype(int)
@orca.column('parcels', cache=True)
def mpa_pa(parcels):
"""
Dummy for presence in Paradise Valley.
"""
return (parcels['mpa'] == 'PA').astype(int)
@orca.column('parcels')
def freeway_dist(year, parcels):
"""
Year dependent freeway distance.
"""
if year <= 2024:
return parcels['fwys_2019_dist']
elif year <= 2030:
return parcels['fwys_2030_dist']
else:
return parcels['fwys_2031_dist']
# make all parcel columns available
# Note: this is ugly, but right now I'm hard-coding these so we don't have to
# load the table first
parcel_broadcast_cols = [
'exlu_long_display_id',
'exlu_display_name',
'exlu_sector_name',
'exlu_dev_status',
'gp_mpa_lucode',
# 'gp_mag_lucode',
'dev_objectid',
'phx_devpolyid',
'redev_tpcid',
'city',
'county',
'county_fullname',
'mpa',
'mpa_fullname',
'maz',
'taz',
'raz',
'ewc_pinal',
'city_17',
'age_restricted',
'bg_geoid',
'section_id',
'hex_id',
'school_district_name',
'school_district_id',
'job_center_id',
'job_center_name',
'zcta_geoid',
'phx_village',
'phx_aoi',
'phx_lua_zone',
'MPO',
'x',
'y',
'area',
'lr_extensions_dist',
'fwys_2030_dist',
'bus_dist',
'rail_dist',
'airport_dist',
'fwys_2024_dist',
'fwys_2019_dist',
'cbd_dist',
'lr_dist',
'fwys_2031_dist',
'lr_stop_dist',
'fwys_2016_dist',
'place_key',
'is_MC',
'is_PC',
'is_tribe',
'east_valley',
'west_valley',
'mpa_sc',
'mpa_ch',
'mpa_ph',
'mpa_pa',
'freeway_dist'
]
for par_col in parcel_broadcast_cols:
for curr_tab in ['buildings', 'households', 'persons', 'jobs',
'seasonal_households', 'gq_persons', 'flu_space',
'k12', 'posths']:
make_broadcast_injectable('parcels', curr_tab, par_col, 'parcel_id')
###################
# buildings schema
###################
@orca.column('buildings', cache=True)
def is_rsf(buildings):
"""
Dummy for single family.
"""
return (buildings['building_type_name'] == 'rsf').astype(int)
@orca.column('buildings', cache=True)
def is_rmf(buildings):
"""
Dummy for multi family.
"""
return (buildings['building_type_name'] == 'rmf').astype(int)
@orca.column('buildings', cache=True)
def is_mf(buildings):
"""
Duplicate dummy for for multi family. TODO: remove this
and update variables referencing it
"""
return buildings['is_rmf']
@orca.column('buildings', cache=True)
def is_mh(buildings):
"""
Dummy for mobile home.
"""
return (buildings['building_type_name'] == 'mh').astype(int)
@orca.column('buildings', cache=True)
def is_med(buildings):
"""
Dummy for medical buildings.
"""
return (buildings['building_type_name'] == 'med').astype(int)
@orca.column('buildings', cache=True)
def sb_med_sector_sampling_weights(buildings):
"""
For medical model sector sampling purpose in elcm.
"""
return (0.75*(buildings['building_type_name'] == 'med') + 0.17*(buildings['building_type_name'] == 'off') \
+ 0.05*(buildings['building_type_name'] == 'retl') + 0.03*(buildings['building_type_name'] == 'gq')).astype(float)
@orca.column('buildings', 'building_age', cache=True)
def building_age(buildings, year):
return year - buildings['year_built']
@orca.column('buildings', cache=True, cache_scope='forever')
def sqft_per_res_unit(buildings):
return fill_nulls(
buildings['residential_sqft'] / buildings['residential_units'], 0)
@orca.column('buildings', cache=True, cache_scope='forever')
def res_sqft_per_unit(buildings):
"""
TEMPORARY -- this is a duplication of `sqft_per_res_unit'
so just return it. TODO: eliminate dependencies to this.
"""
return buildings['sqft_per_res_unit']
@orca.column('buildings', cache=True, cache_scope='forever')
def land_area_per_unit(buildings):
"""
Land area per residential unit, only for residential buildings
"""
return fill_nulls(buildings['area'] / buildings['residential_units'])
@orca.column('buildings', cache=True, cache_scope='forever')
def value_per_res_unit(buildings):
return fill_nulls(
buildings['total_fcv'] / buildings['residential_units'], 0)
@orca.column('buildings', cache=True, cache_scope='forever')
def value_per_res_sqft(buildings):
return fill_nulls(
buildings['total_fcv'] / buildings['residential_sqft'], 0)
@orca.column('buildings', cache=True, cache_scope='forever')
def value_per_nonres_sqft(buildings):
return fill_nulls(
buildings['total_fcv'] / buildings['non_residential_sqft'], 0)
@orca.column('buildings', cache=True, cache_scope='forever')
def is_residential(buildings):
return buildings['building_type_name'].isin(['rsf', 'rmf', 'mh'])
@orca.column('buildings', cache=True, cache_scope='iteration')
def res_hh(buildings, households):
hh_sums = households.local.groupby('building_id').size()
return hh_sums.reindex(buildings.index).fillna(0)
@orca.column('buildings', cache=True, cache_scope='iteration')
def seas_hh(buildings, seasonal_households):
seas_sums = seasonal_households.local.groupby('building_id').size()
return seas_sums.reindex(buildings.index).fillna(0)
@orca.column('buildings', cache=True, cache_scope='iteration')
def total_hh(buildings):
return buildings['res_hh'] + buildings['seas_hh']
@orca.column('buildings', cache=True, cache_scope='iteration')
def vac_res_units(buildings):
return buildings['residential_units'] - buildings['total_hh']
@orca.column('buildings', cache=True, cache_scope='iteration')
def site_based_jobs(buildings, jobs):
sb = jobs.local.query("job_class == 'site based'")
return sb.groupby('building_id').size().reindex(buildings.index).fillna(0)
@orca.column('buildings', cache=True, cache_scope='iteration')
def job_spaces(buildings):
est_job_spaces = np.round(fill_nulls(
buildings['non_residential_sqft'] / buildings['sqft_per_job'], 0))
return pd.DataFrame([est_job_spaces, buildings['site_based_jobs']]).max()
@orca.column('buildings', cache=True, cache_scope='iteration')
def vac_job_spaces(buildings):
return buildings['job_spaces'] - buildings['site_based_jobs']
@orca.column('buildings', cache=True, cache_scope='iteration')
def total_sqft(buildings):
return buildings['residential_sqft'].fillna(0) + buildings['non_residential_sqft'].fillna(0)
@orca.column('buildings', cache=True, cache_scope='iteration')
def res_units_in_last5(buildings):
"""
# of residential units built in the last 5 years.
"""
is_new = (buildings['building_age'] <= 5).astype(int)
return buildings['residential_units'] * is_new
@orca.column('buildings', cache=True, cache_scope='iteration')
def sf_res_units_in_last5(buildings):
"""
# of single family residential units built in the last 5 years.
"""
is_sf = buildings['building_type_name'] == 'rsf'
is_new = (buildings['building_age'] <= 5)
return buildings['residential_units'] * (is_sf & is_new).astype(int)
@orca.column('buildings', cache=True, cache_scope='iteration')
def mf_res_units_in_last5(buildings):
"""
# of single family residential units built in the last 5 years.
"""
is_mf = buildings['building_type_name'] == 'rmf'
is_new = (buildings['building_age'] <= 5)
return buildings['residential_units'] * (is_mf & is_new).astype(int)
@orca.column('buildings')
def sb_edu_jobs(buildings, jobs):
"""
Number of naics 61 (education) jobs in a building.
"""
sb_naics61_jobs = jobs.local.query("job_class == 'site based' and mag_naics == '61'")
return sb_naics61_jobs.groupby('building_id').size().reindex(buildings.index).fillna(0)
# broadcast building variables
bldg_broadcast_cols = ['building_type_name', 'residential_sqft', 'residential_units',
'total_fcv', 'year_built', 'parcel_id']
for bldg_col in bldg_broadcast_cols:
for curr_tab in ['households', 'persons', 'jobs', 'seasonal_households', 'gq_persons']:
make_broadcast_injectable('buildings', curr_tab, bldg_col, 'building_id')
#####################
# households schema
#####################
@orca.column('households', cache=True, cache_scope='iteration')
def income_quintile(households):
"""
Household income quintile. TODO: do we want to do this at the
county level?
"""
return pd.Series(
pd.qcut(
households['income'], 5, [1, 2, 3, 4, 5]),
index=households.index
)
@orca.column('households', cache=True, cache_scope='iteration')
def income_quintile_MC(households):
"""
Household income quintile for MC housedolds only.
"""
MC_hhlds = households.to_frame(['income', 'county']).query("county == 'MC'")
return | |
crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.port_in_connect.source, testentity.port_in, "The connection's source is the pulled up port")
self.assertEqual(testentity.port_in_connect.target, testentity.sub1.port_in, "The connection's target is the subentity's input port")
# check port pull up
self.assertTrue(isinstance(testentity.port_out, crest.Output), "The entity pulled up an output port")
self.assertEqual(testentity.port_out.value, testentity.sub2.port_out.value, "The pulled up port has the same value")
self.assertEqual(testentity.port_out.resource, testentity.sub2.port_out.resource, "The pulled up port has the same resource")
# check influence
self.assertTrue(isinstance(testentity.port_out_connect, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.port_out_connect.source, testentity.sub2.port_out, "The connection's source is the subentity output port")
self.assertEqual(testentity.port_out_connect.target, testentity.port_out, "The connection's target is the pulled up output port")
def test_named_pullup_multiple_ports(self):
class SubClass(self.testclass):
def __init__(self):
api.pullup(my_port_in=self.sub1.port_in, my_port_out=self.sub2.port_out)
testentity = SubClass()
# check port pull up
self.assertTrue(isinstance(testentity.my_port_in, crest.Input), "The entity pulled up port")
self.assertEqual(testentity.my_port_in.value, testentity.sub1.port_in.value, "The pulled up port has the same value")
self.assertEqual(testentity.my_port_in.resource, testentity.sub1.port_in.resource, "The pulled up port has the same resource")
# check influence
self.assertTrue(isinstance(testentity.my_port_in_connect, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.my_port_in_connect.source, testentity.my_port_in, "The connection's source is the pulled up port")
self.assertEqual(testentity.my_port_in_connect.target, testentity.sub1.port_in, "The connection's target is the subentity's input port")
# check port pull up
self.assertTrue(isinstance(testentity.my_port_out, crest.Output), "The entity pulled up an output port")
self.assertEqual(testentity.my_port_out.value, testentity.sub2.port_out.value, "The pulled up port has the same value")
self.assertEqual(testentity.my_port_out.resource, testentity.sub2.port_out.resource, "The pulled up port has the same resource")
# check influence
self.assertTrue(isinstance(testentity.my_port_out_connect, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.my_port_out_connect.source, testentity.sub2.port_out, "The connection's source is the subentity output port")
self.assertEqual(testentity.my_port_out_connect.target, testentity.my_port_out, "The connection's target is the pulled up output port")
def test_pullup_multiple_ports_name_clash(self):
class SubClass(self.testclass):
def __init__(self):
api.pullup(self.sub1.port_in, self.sub2.port_in)
with self.assertRaises(ValueError) as context:
SubClass()
self.assertEqual(str(context.exception),"Error during pullup. There exists already an object called 'port_in'.")
def test_pullup_two_individual_pullups_name_clash(self):
class SubClass(self.testclass):
def __init__(self):
api.pullup(self.sub1.port_in)
api.pullup(self.sub2.port_in)
with self.assertRaises(ValueError) as context:
SubClass()
self.assertEqual(str(context.exception),"Error during pullup. There exists already an object called 'port_in'.")
def test_pullup_influence_name_clash(self):
class SubClass(self.testclass):
def __init__(self):
self.port_in_connect = crest.Influence(source=self.sub1.port_out, target=self.sub2.port_out)
api.pullup(self.sub1.port_in)
api.pullup(self.sub2.port_in)
with self.assertRaises(ValueError) as context:
SubClass()
self.assertEqual(str(context.exception), "Error during pullup. Cannot create connection influence. Name 'port_in_connect' already exists.")
def test_pullup_multiple_ports_avoid_name_clash(self):
class SubClass(self.testclass):
def __init__(self):
api.pullup(my_port_in1=self.sub1.port_in,
my_port_in2=self.sub2.port_in)
testentity = SubClass()
# check port pull up
self.assertTrue(isinstance(testentity.my_port_in1, crest.Input), "The entity pulled up port")
self.assertEqual(testentity.my_port_in1.value, testentity.sub1.port_in.value, "The pulled up port has the same value")
self.assertEqual(testentity.my_port_in1.resource, testentity.sub1.port_in.resource, "The pulled up port has the same resource")
# check influence
self.assertTrue(isinstance(testentity.my_port_in1_connect, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.my_port_in1_connect.source, testentity.my_port_in1, "The connection's source is the pulled up port")
self.assertEqual(testentity.my_port_in1_connect.target, testentity.sub1.port_in, "The connection's target is the subentity's input port")
# check port pull up
self.assertTrue(isinstance(testentity.my_port_in2, crest.Input), "The entity pulled up port")
self.assertEqual(testentity.my_port_in2.value, testentity.sub1.port_in.value, "The pulled up port has the same value")
self.assertEqual(testentity.my_port_in2.resource, testentity.sub1.port_in.resource, "The pulled up port has the same resource")
# check influence
self.assertTrue(isinstance(testentity.my_port_in2_connect, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.my_port_in2_connect.source, testentity.my_port_in2, "The connection's source is the pulled up port")
self.assertEqual(testentity.my_port_in2_connect.target, testentity.sub2.port_in, "The connection's target is the subentity's input port")
def test_pullup_two_individual_pullups_avoid_name_clash(self):
class SubClass(self.testclass):
def __init__(self):
api.pullup(my_port_in1=self.sub1.port_in)
api.pullup(my_port_in2=self.sub2.port_in)
testentity = SubClass()
# check port pull up
self.assertTrue(isinstance(testentity.my_port_in1, crest.Input), "The entity pulled up port")
self.assertEqual(testentity.my_port_in1.value, testentity.sub1.port_in.value, "The pulled up port has the same value")
self.assertEqual(testentity.my_port_in1.resource, testentity.sub1.port_in.resource, "The pulled up port has the same resource")
# check influence
self.assertTrue(isinstance(testentity.my_port_in1_connect, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.my_port_in1_connect.source, testentity.my_port_in1, "The connection's source is the pulled up port")
self.assertEqual(testentity.my_port_in1_connect.target, testentity.sub1.port_in, "The connection's target is the subentity's input port")
# check port pull up
self.assertTrue(isinstance(testentity.my_port_in2, crest.Input), "The entity pulled up port")
self.assertEqual(testentity.my_port_in2.value, testentity.sub1.port_in.value, "The pulled up port has the same value")
self.assertEqual(testentity.my_port_in2.resource, testentity.sub1.port_in.resource, "The pulled up port has the same resource")
# check influence
self.assertTrue(isinstance(testentity.my_port_in2_connect, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.my_port_in2_connect.source, testentity.my_port_in2, "The connection's source is the pulled up port")
self.assertEqual(testentity.my_port_in2_connect.target, testentity.sub2.port_in, "The connection's target is the subentity's input port")
class ConvenienceAPI_RelayTest(unittest.TestCase):
"""
These tests checks the api's relay function.
"""
def setUp(self):
"""Create an entity with subentities from which we can pullup and relay"""
res = crest.Resource("test", crest.REAL)
class TestSubEntity(crest.Entity):
state = current = crest.State()
port_in = crest.Input(res, 111)
port_in2 = crest.Input(res, 222)
port_out = crest.Output(res, 11111)
port_out2 = crest.Output(res, 22222)
class TestEntity(crest.Entity):
state = current = crest.State()
local = crest.Local(res, 9999)
local2 = crest.Local(res, 8888)
sub1 = TestSubEntity()
sub2 = TestSubEntity()
self.testclass = TestEntity
def test_single_relay(self):
class SubClass(self.testclass):
def __init__(self):
api.relay( (self.sub1.port_out, self.sub2.port_in) )
testentity = SubClass()
# check influence
self.assertTrue(isinstance(testentity.sub1_port_out_TO_sub2_port_in, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.sub1_port_out_TO_sub2_port_in.source, testentity.sub1.port_out, "The connection's correctly set.")
self.assertEqual(testentity.sub1_port_out_TO_sub2_port_in.target, testentity.sub2.port_in, "The connection's target is correctly set.")
def test_single_named_relay(self):
class SubClass(self.testclass):
def __init__(self):
api.relay( my_relay=(self.sub1.port_out, self.sub2.port_in) )
testentity = SubClass()
# check influence
self.assertTrue(isinstance(testentity.my_relay, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.my_relay.source, testentity.sub1.port_out, "The connection's source is correctly set.")
self.assertEqual(testentity.my_relay.target, testentity.sub2.port_in, "The connection's target is correctly set.")
def test_multiple_relay(self):
class SubClass(self.testclass):
def __init__(self):
api.relay( (self.sub1.port_out, self.sub2.port_in),
(self.sub1.port_out2, self.sub2.port_in2) )
testentity = SubClass()
# check influence
self.assertTrue(isinstance(testentity.sub1_port_out_TO_sub2_port_in, crest.Influence), "The entity created an influence for the relayed port")
self.assertEqual(testentity.sub1_port_out_TO_sub2_port_in.source, testentity.sub1.port_out, "The connection's source is correctly set.")
self.assertEqual(testentity.sub1_port_out_TO_sub2_port_in.target, testentity.sub2.port_in, "The connection's target is correctly set.")
# check influence
self.assertTrue(isinstance(testentity.sub1_port_out2_TO_sub2_port_in2, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.sub1_port_out2_TO_sub2_port_in2.source, testentity.sub1.port_out2, "The connection's source is correctly set.")
self.assertEqual(testentity.sub1_port_out2_TO_sub2_port_in2.target, testentity.sub2.port_in2, "The connection's target correctly set.")
def test_multiple_named_relay(self):
class SubClass(self.testclass):
def __init__(self):
api.relay( my_relay=(self.sub1.port_out, self.sub2.port_in),
my_relay2=(self.sub1.port_out2, self.sub2.port_in2) )
testentity = SubClass()
# check influence
self.assertTrue(isinstance(testentity.my_relay, crest.Influence), "The entity created an influence for the relayed port")
self.assertEqual(testentity.my_relay.source, testentity.sub1.port_out, "The connection's source is correctly set.")
self.assertEqual(testentity.my_relay.target, testentity.sub2.port_in, "The connection's target is correctly set.")
# check influence
self.assertTrue(isinstance(testentity.my_relay2, crest.Influence), "The entity created an influence for the pulled up port")
self.assertEqual(testentity.my_relay2.source, testentity.sub1.port_out2, "The connection's source is correctly set.")
self.assertEqual(testentity.my_relay2.target, testentity.sub2.port_in2, "The connection's target correctly set.")
def test_relay_name_clash(self):
class SubClass(self.testclass):
def __init__(self):
api.relay( (self.sub1.port_out, self.sub2.port_in),
(self.sub1.port_out, self.sub2.port_in))
with self.assertRaises(ValueError) as context:
SubClass()
self.assertEqual(str(context.exception), "Error during relay. Cannot create influence. Name 'sub1_port_out_TO_sub2_port_in' already exists.")
def test_named_relay_name_clash(self):
class SubClass(self.testclass):
def __init__(self):
api.relay(my_relay=(self.sub1.port_out, self.sub2.port_in))
api.relay(my_relay=(self.sub1.port_out2, self.sub2.port_in2))
with self.assertRaises(ValueError) as context:
SubClass()
self.assertEqual(str(context.exception), "Error during relay. Cannot create influence. Name 'my_relay' already exists.")
def test_source_is_not_port(self):
class SubClass(self.testclass):
def __init__(self):
api.relay(my_relay=(self.state, self.sub2.port_in))
with self.assertRaises(ValueError) as context:
SubClass()
self.assertEqual(str(context.exception), "Source object 'state' is not a Port.")
def test_target_is_not_port(self):
class SubClass(self.testclass):
def __init__(self):
api.relay(my_relay=(self.sub1.port_out, self.state))
with self.assertRaises(ValueError) as context:
SubClass()
self.assertEqual(str(context.exception), "Target object 'state' is not a Port.")
def test_both_ports_in_entity(self):
class SubClass(self.testclass):
def __init__(self):
api.relay( my_relay=(self.local, self.local2) )
testentity = SubClass()
# check influence
self.assertTrue(isinstance(testentity.my_relay, crest.Influence), "The entity created an influence for the relayed port")
self.assertEqual(testentity.my_relay.source, testentity.local, "The connection's source is correctly set.")
self.assertEqual(testentity.my_relay.target, testentity.local2, "The connection's target is correctly set.")
def test_source_port_in_subentity(self):
class SubClass(self.testclass):
def __init__(self):
api.relay( my_relay=(self.sub1.port_out, self.local2) )
testentity = SubClass()
# check influence
self.assertTrue(isinstance(testentity.my_relay, crest.Influence), "The entity created an influence for the relayed port")
self.assertEqual(testentity.my_relay.source, testentity.sub1.port_out, "The connection's source is correctly set.")
self.assertEqual(testentity.my_relay.target, testentity.local2, "The connection's target is correctly set.")
def test_target_port_in_subentity(self):
class SubClass(self.testclass):
def __init__(self):
api.relay( my_relay=(self.local, self.sub1.port_in) )
testentity = SubClass()
# check influence
self.assertTrue(isinstance(testentity.my_relay, crest.Influence), "The entity created an influence for the relayed port")
self.assertEqual(testentity.my_relay.source, testentity.local, "The connection's source is correctly set.")
self.assertEqual(testentity.my_relay.target, testentity.sub1.port_in, "The connection's target is correctly set.")
def test_source_parent_is_None_throws_error(self):
class SubClass(self.testclass):
def __init__(self):
pass
local = crest.Local(crest.Resource("dummy", crest.REAL), 12345)
testentity = SubClass()
with self.assertRaises(ValueError) as context:
api.relay( my_relay=(local, testentity.sub1.port_in) )
self.assertEqual(str(context.exception), "Either the source or the target port are not inside an entity")
def test_target_parent_is_None_throws_error(self):
class SubClass(self.testclass):
def __init__(self):
pass
local = crest.Local(crest.Resource("dummy", crest.REAL), 12345)
testentity = SubClass()
with self.assertRaises(ValueError) as context:
api.relay( my_relay=(testentity.sub1.port_in, local) )
self.assertEqual(str(context.exception), "Either the source or the target port are not | |
<gh_stars>1-10
import gpflow
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
import gpflow
from gpflow.utilities import ops, print_summary
from gpflow.config import set_default_float, default_float, set_default_summary_fmt
from gpflow.ci_utils import ci_niter
from typing import Optional
from gpflow import covariances, kernels, likelihoods
from gpflow.base import Parameter
from gpflow.config import default_float, default_jitter
from gpflow.expectations import expectation
from gpflow.inducing_variables import InducingPoints
from gpflow.kernels import Kernel
from gpflow.mean_functions import MeanFunction, Zero
from gpflow.probability_distributions import DiagonalGaussian
from gpflow.utilities import positive, to_default_float
from gpflow.utilities.ops import pca_reduce
from gpflow.models.model import BayesianModel
from gpflow.models.training_mixins import InputData, InternalDataTrainingLossMixin, OutputData
from gpflow.models.util import data_input_to_tensor, inducingpoint_wrapper
#import cop-e-cat dfs
Y = data_df[['ALBUMIN', 'ANION GAP',
'BASE EXCESS', 'BICARBONATE', 'BILIRUBIN', 'CALCIUM',
'CARBOXYHEMOGLOBIN', 'CHLORIDE', 'CREATININE', 'HEMATOCRIT',
'HEMOGLOBIN', 'INSPIRED OXYGEN', 'INTERNATIONAL NORMALIZED RATIO',
'LACTATE', 'METHEMOGLOBIN', 'PARTIAL THROMBOPLASTIN TIME', 'PCO2', 'PH',
'PLATELETS', 'PO2', 'POTASSIUM', 'SODIUM', 'UREA NITROGEN',
'URINE OUTPUT', 'WHITE BLOOD CELLS', 'FIO2', 'PEEP', 'OXYGEN (L)',
'Respiratory Aids', 'Nonsteroidal Anti-inflammatory Agents (NSAIDs)',
'Corticosteroids - Topical', 'Mineralocorticoids',
'Glucocorticosteroids', 'Influenza Agents', 'Antiretrovirals',]]
# Y = Y[:300]
for col in Y.columns:
Y[(Y < 0) | (Y > 1000)][col] = Y[(Y > 0) & (Y < 1000)][col].mean()
std = Y.std()
zero_std, = np.where(std == 0)
std[zero_std] = 1
Y = (Y - Y.mean()) / std
class tGPLVM(BayesianModel, InternalDataTrainingLossMixin):
def __init__(
self,
data: OutputData,
X_data_mean: tf.Tensor,
X_data_var: tf.Tensor,
kernel: Kernel,
mean_function=None,
num_inducing_variables: Optional[int] = None,
inducing_variable=None,
X_prior_mean=None,
X_prior_var=None,
degrees_of_freedom=3,
):
"""
Initialise tGPLVM object. This method only works with a Gaussian likelihood.
:param data: data matrix, size N (number of points) x D (dimensions)
:param X_data_mean: initial latent positions, size N (number of points) x Q (latent dimensions).
:param X_data_var: variance of latent positions ([N, Q]), for the initialisation of the latent space.
:param kernel: kernel specification, by default Squared Exponential
:param num_inducing_variables: number of inducing points, M
:param inducing_variable: matrix of inducing points, size M (inducing points) x Q (latent dimensions). By default
random permutation of X_data_mean.
:param X_prior_mean: prior mean used in KL term of bound. By default 0. Same size as X_data_mean.
:param X_prior_var: prior variance used in KL term of bound. By default 1.
"""
super().__init__()
num_data, self.num_latent_gps = X_data_mean.shape
if mean_function is None:
mean_function = Zero()
self.mean_function = mean_function
self.kernel = kernel
self.likelihood = likelihoods.Gaussian()
self.data = data_input_to_tensor(data)
assert X_data_var.ndim == 2
self.X_data_mean = Parameter(X_data_mean)
self.X_data_var = Parameter(X_data_var, transform=positive())
self.num_data = num_data
self.output_dim = self.data.shape[-1]
self.dof = degrees_of_freedom
assert np.all(X_data_mean.shape == X_data_var.shape)
assert X_data_mean.shape[0] == self.data.shape[0], "X mean and Y must be same size."
assert X_data_var.shape[0] == self.data.shape[0], "X var and Y must be same size."
if (inducing_variable is None) == (num_inducing_variables is None):
raise ValueError(
"BayesianGPLVM needs exactly one of `inducing_variable` and `num_inducing_variables`"
)
if inducing_variable is None:
Z = X_data_mean[xu_init]
inducing_variable = InducingPoints(Z)
self.inducing_variable = inducingpoint_wrapper(inducing_variable)
assert X_data_mean.shape[1] == self.num_latent_gps
# deal with parameters for the prior mean variance of X
if X_prior_mean is None:
X_prior_mean = tf.zeros((self.num_data, self.num_latent_gps), dtype=default_float())
if X_prior_var is None:
X_prior_var = tf.ones((self.num_data, self.num_latent_gps))
self.X_prior_mean = tf.convert_to_tensor(np.atleast_1d(X_prior_mean), dtype=default_float())
self.X_prior_var = tf.convert_to_tensor(np.atleast_1d(X_prior_var), dtype=default_float())
assert self.X_prior_mean.shape[0] == self.num_data
assert self.X_prior_mean.shape[1] == self.num_latent_gps
assert self.X_prior_var.shape[0] == self.num_data
assert self.X_prior_var.shape[1] == self.num_latent_gps
def maximum_log_likelihood_objective(self) -> tf.Tensor:
return self.elbo()
def elbo(self) -> tf.Tensor:
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood.
"""
Y_data = self.data
pX = DiagonalGaussian(self.X_data_mean, self.X_data_var)
num_inducing = self.inducing_variable.num_inducing
psi0 = tf.reduce_sum(expectation(pX, self.kernel))
psi1 = expectation(pX, (self.kernel, self.inducing_variable))
psi2 = tf.reduce_sum(
expectation(
pX, (self.kernel, self.inducing_variable), (self.kernel, self.inducing_variable)
),
axis=0,
)
cov_uu = covariances.Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
L = tf.linalg.cholesky(cov_uu)
sigma2 = self.likelihood.variance
sigma = tf.sqrt(sigma2)
# Compute intermediate matrices
A = tf.linalg.triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.linalg.triangular_solve(L, psi2, lower=True)
AAT = tf.linalg.triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = tf.linalg.cholesky(B)
log_det_B = 2.0 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LB)))
c = tf.linalg.triangular_solve(LB, tf.linalg.matmul(A, Y_data), lower=True) / sigma
# KL[q(x) || p(x)]
dX_data_var = (
self.X_data_var
if self.X_data_var.shape.ndims == 2
else tf.linalg.diag_part(self.X_data_var)
)
NQ = to_default_float(tf.size(self.X_data_mean))
D = to_default_float(tf.shape(Y_data)[1])
KL = -0.5 * tf.reduce_sum(tf.math.log(dX_data_var))
KL += 0.5 * tf.reduce_sum(tf.math.log(self.X_prior_var))
KL -= 0.5 * NQ
KL += 0.5 * tf.reduce_sum(
(tf.square(self.X_data_mean - self.X_prior_mean) + dX_data_var) / self.X_prior_var
)
# compute log marginal bound
ND = to_default_float(tf.size(Y_data))
bound = -0.5 * ND * tf.math.log(2 * np.pi * sigma2)
bound += -0.5 * D * log_det_B
bound += -0.5 * tf.reduce_sum(tf.square(Y_data)) / sigma2
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * D * (tf.reduce_sum(psi0) / sigma2 - tf.reduce_sum(tf.linalg.diag_part(AAT)))
bound -= KL
return bound
def calc_num_latent_gps_from_data(self, data, kernel, likelihood):
"""
Calculates the number of latent GPs required based on the data as well
as the type of kernel and likelihood.
"""
_, Y = data
output_dim = Y.shape[-1]
if isinstance(kernel, MultioutputKernel):
# MultioutputKernels already have num_latent_gps attributes
num_latent_gps = kernel.num_latent_gps
elif isinstance(likelihood, SwitchedLikelihood):
# the SwitchedLikelihood partitions/stitches based on the last
# column in Y, but we should not add a separate latent GP for this!
# hence decrement by 1
num_latent_gps = output_dim - 1
assert num_latent_gps > 0
else:
num_latent_gps = output_dim
return num_latent_gps
def predict_f(self, Xnew, full_cov=False, full_output_cov=False):
"""
:param Xnew: points at which to predict
"""
if full_output_cov:
raise NotImplementedError
pX = DiagonalGaussian(self.X_data_mean, self.X_data_var)
Y_data = self.data
num_inducing = self.inducing_variable.num_inducing
psi1 = expectation(pX, (self.kernel, self.inducing_variable))
psi2 = tf.reduce_sum(
expectation(
pX, (self.kernel, self.inducing_variable), (self.kernel, self.inducing_variable)
),
axis=0,
)
jitter = default_jitter()
Kus = covariances.Kuf(self.inducing_variable, self.kernel, Xnew)
sigma2 = self.likelihood.variance
sigma = tf.sqrt(sigma2)
L = tf.linalg.cholesky(covariances.Kuu(self.inducing_variable, self.kernel, jitter=jitter))
A = tf.linalg.triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.linalg.triangular_solve(L, psi2, lower=True)
AAT = tf.linalg.triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=default_float())
LB = tf.linalg.cholesky(B)
c = tf.linalg.triangular_solve(LB, tf.linalg.matmul(A, Y_data), lower=True) / sigma
tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)
tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)
mean = tf.linalg.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = (
self.kernel(Xnew)
+ tf.linalg.matmul(tmp2, tmp2, transpose_a=True)
- tf.linalg.matmul(tmp1, tmp1, transpose_a=True)
)
shape = tf.stack([1, 1, tf.shape(Y_data)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = (
self.kernel(Xnew, full_cov=False)
+ tf.reduce_sum(tf.square(tmp2), axis=0)
- tf.reduce_sum(tf.square(tmp1), axis=0)
)
shape = tf.stack([1, tf.shape(Y_data)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
def predict_f_samples(
self,
Xnew: InputData,
num_samples: Optional[int] = None,
full_cov: bool = True,
full_output_cov: bool = False,
) -> tf.Tensor:
"""
Produce samples from the posterior latent function(s) at the input points.
:param Xnew: InputData
Input locations at which to draw samples, shape [..., N, D]
where N is the number of rows and D is the input dimension of each point.
:param num_samples:
Number of samples to draw.
If `None`, a single sample is drawn and the return shape is [..., N, P],
for any positive integer the return shape contains an extra batch
dimension, [..., S, N, P], with S = num_samples and P is the number of outputs.
:param full_cov:
If True, draw correlated samples over the inputs. Computes the Cholesky over the
dense covariance matrix of size [num_data, num_data].
If False, draw samples that are uncorrelated over the inputs.
:param full_output_cov:
If True, draw correlated samples over the outputs.
If False, draw samples that are uncorrelated over the outputs.
Currently, the method does not support `full_output_cov=True` and `full_cov=True`.
"""
mean, cov = self.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
y = tfp.distributions.StudentT(self.df, mean, cov, name='StudentT')
return y.sample(sample_shape=(num_samples))
def predict_y(self, Xnew, full_cov=False, full_output_cov=False):
"""
Compute the mean and variance of the held-out data at the input points.
"""
f_mean, f_var = self.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
return self.likelihood.predict_mean_and_var(f_mean, f_var)
def predict_log_density(self, data, full_cov=False, full_output_cov=False):
"""
Compute the log density of the data at the new data points.
"""
X, Y = data
f_mean, f_var = self.predict_f(X, full_cov=full_cov, full_output_cov=full_output_cov)
return self.likelihood.predict_log_density(f_mean, f_var, Y)
from sklearn.decomposition import PCA
latent_dim = 3 # number of latent dimensions
num_inducing = 20 # number of inducing pts
num_data = Y.shape[0] # number of data points
pca = PCA(n_components = latent_dim)
X_mean_init = tf.convert_to_tensor(pca.fit_transform(Y))
X_var_init = tf.ones((num_data, latent_dim), dtype=default_float())
np.random.seed(1) # for reproducibility
batchsize = 30
opt = gpflow.optimizers.Scipy()
maxiter = ci_niter(100)
params = None
for i in range(0, Y.shape[0] - batchsize, batchsize):
print(i)
inducing_variable = tf.convert_to_tensor(
np.random.permutation(X_mean_init[i:i+batchsize].numpy())[:num_inducing], dtype=default_float()
)
lengthscales = | |
modulus 7^20
sage: Zp(7, 20, 'capped-abs')
7-adic Ring with capped absolute precision 20
We create a capped relative ring with each print mode::
sage: k = Zp(7, 8, print_mode='series'); k
7-adic Ring with capped relative precision 8
sage: k(7*(19))
5*7 + 2*7^2 + O(7^9)
sage: k(7*(-19))
2*7 + 4*7^2 + 6*7^3 + 6*7^4 + 6*7^5 + 6*7^6 + 6*7^7 + 6*7^8 + O(7^9)
::
sage: k = Zp(7, print_mode='val-unit'); k
7-adic Ring with capped relative precision 20
sage: k(7*(19))
7 * 19 + O(7^21)
sage: k(7*(-19))
7 * 79792266297611982 + O(7^21)
::
sage: k = Zp(7, print_mode='terse'); k
7-adic Ring with capped relative precision 20
sage: k(7*(19))
133 + O(7^21)
sage: k(7*(-19))
558545864083283874 + O(7^21)
Note that `p`-adic rings are cached (via weak references)::
sage: a = Zp(7); b = Zp(7)
sage: a is b
True
We create some elements in various rings::
sage: R = Zp(5); a = R(4); a
4 + O(5^20)
sage: S = Zp(5, 10, type = 'capped-abs'); b = S(2); b
2 + O(5^10)
sage: a + b
1 + 5 + O(5^10)
"""
def create_key(self, p, prec = None, type = 'capped-rel', print_mode = None,
names = None, ram_name = None, print_pos = None, print_sep = None, print_alphabet = None,
print_max_terms = None, show_prec = None, check = True,
label = None):
r"""
Creates a key from input parameters for ``Zp``.
See the documentation for ``Zp`` for more information.
TESTS::
sage: Zp.create_key(5,40)
(5, 40, 'capped-rel', 'series', '5', True, '|', (), -1, 'bigoh', None)
sage: Zp.create_key(5,40,print_mode='digits')
(5,
40,
'capped-rel',
'digits',
'5',
True,
'|',
('0', '1', '2', '3', '4'),
-1,
'dots',
None)
"""
if isinstance(names, (int, Integer)):
# old pickle; names is what used to be halt.
names = ram_name
ram_name = print_pos
print_pos = print_sep
print_alphabet = print_max_terms
print_max_terms = check
check = True
if label is not None and type not in ['lattice-cap','lattice-float']:
raise ValueError("label keyword only supported for lattice precision")
return get_key_base(p, prec, type, print_mode, names, ram_name, print_pos, print_sep, print_alphabet,
print_max_terms, show_prec, check,
['capped-rel', 'fixed-mod', 'capped-abs', 'floating-point', 'lattice-cap', 'lattice-float'],
label=label)
def create_object(self, version, key):
r"""
Creates an object using a given key.
See the documentation for ``Zp`` for more information.
TESTS::
sage: Zp.create_object((3,4,2),(5, 41, 'capped-rel', 'series', '5', True, '|', (), -1))
5-adic Ring with capped relative precision 41
"""
if (version[0] < 3 or (len(version) > 1 and version[0] == 3 and version[1] < 2) or
(len(version) > 2 and version[0] == 3 and version[1] == 2 and version[2] < 3)):
p, prec, type, print_mode, name = key
print_pos, print_sep, print_alphabet, print_max_terms = None, None, None, None
elif version[0] < 8:
p, prec, type, print_mode, name, print_pos, print_sep, print_alphabet, print_max_terms = key
show_prec = None
label = None
else:
p, prec, type, print_mode, name, print_pos, print_sep, print_alphabet, print_max_terms, show_prec, label = key
if (version[0] < 4 or (len(version) > 1 and version[0] == 4 and version[1] < 5) or
(len(version) > 2 and version[0] == 4 and version[1] == 5 and version[2] < 3)):
# keys changed in order to reduce irrelevant duplications: e.g. two Zps with print_mode 'series'
# that are identical except for different 'print_alphabet' now return the same object.
key = get_key_base(p, prec, type, print_mode, name, None, print_pos, print_sep, print_alphabet,
print_max_terms, None, False, ['capped-rel', 'fixed-mod', 'capped-abs', 'lattice-cap', 'lattice-float'])
try:
obj = self._cache[version, key]()
if obj is not None:
return obj
except KeyError:
pass
p, prec, type, print_mode, name, print_pos, print_sep, print_alphabet, print_max_terms, show_prec, label = key
if type == 'capped-rel':
return pAdicRingCappedRelative(p, prec, {'mode': print_mode, 'pos': print_pos, 'sep': print_sep, 'alphabet': print_alphabet,
'ram_name': name, 'max_ram_terms': print_max_terms, 'show_prec': show_prec}, name)
elif type == 'fixed-mod':
return pAdicRingFixedMod(p, prec, {'mode': print_mode, 'pos': print_pos, 'sep': print_sep, 'alphabet': print_alphabet,
'ram_name': name, 'max_ram_terms': print_max_terms, 'show_prec': show_prec}, name)
elif type == 'capped-abs':
return pAdicRingCappedAbsolute(p, prec, {'mode': print_mode, 'pos': print_pos, 'sep': print_sep, 'alphabet': print_alphabet,
'ram_name': name, 'max_ram_terms': print_max_terms, 'show_prec': show_prec}, name)
elif type == 'floating-point':
return pAdicRingFloatingPoint(p, prec, {'mode': print_mode, 'pos': print_pos, 'sep': print_sep, 'alphabet': print_alphabet,
'ram_name': name, 'max_ram_terms': print_max_terms, 'show_prec': show_prec}, name)
elif type[:8] == 'lattice-':
subtype = type[8:]
return pAdicRingLattice(p, prec, subtype, {'mode': print_mode, 'pos': print_pos, 'sep': print_sep, 'alphabet': print_alphabet,
'ram_name': name, 'max_ram_terms': print_max_terms, 'show_prec': show_prec}, name, label)
else:
raise ValueError("unexpected type")
Zp = Zp_class("Zp")
######################################################
# Zq -- unramified extensions
######################################################
def Zq(q, prec = None, type = 'capped-rel', modulus = None, names=None,
print_mode=None, ram_name = None, res_name = None, print_pos = None,
print_sep = None, print_max_ram_terms = None,
print_max_unram_terms = None, print_max_terse_terms = None, show_prec = None, check = True, implementation = 'FLINT'):
r"""
Given a prime power `q = p^n`, return the unique unramified
extension of `\mathbb{Z}_p` of degree `n`.
INPUT:
- ``q`` -- integer, list or tuple: the prime power in `\mathbb{Q}_q`. Or a
factorization object, single element list ``[(p, n)]`` where ``p`` is
a prime and ``n`` a positive integer, or the pair ``(p, n)``.
- ``prec`` -- integer (default: ``20``) the precision cap of the
field. Individual elements keep track of their own precision.
See TYPES and PRECISION below.
- ``type`` -- string (default: ``'capped-rel'``) Valid types are
``'capped-abs'``, ``'capped-rel'``, ``'fixed-mod'``, and
``'floating-point'``. See TYPES and PRECISION below
- modulus -- polynomial (default None) A polynomial defining an
unramified extension of `\mathbb{Z}_p`. See MODULUS below.
- ``names`` -- string or tuple (``None`` is only allowed when
`q=p`). The name of the generator, reducing to a generator of
the residue field.
- ``print_mode`` -- string (default: ``None``). Valid modes are ``'series'``,
``'val-unit'``, ``'terse'``, and ``'bars'``. See PRINTING below.
- ``ram_name`` -- string (defaults to string representation of `p` if
None). ``ram_name`` controls how the prime is printed. See PRINTING
below.
- ``res_name`` -- string (defaults to ``None``, which corresponds
to adding a ``'0'`` to the end of the name). Controls how
elements of the reside field print.
- ``print_pos`` -- bool (default ``None``) Whether to only use
positive integers in the representations of elements. See
PRINTING below.
- ``print_sep`` -- string (default ``None``) The separator
character used in the ``'bars'`` mode. See PRINTING below.
- ``print_max_ram_terms`` -- integer (default ``None``) The maximum
number of powers of `p` shown. See PRINTING below.
- ``print_max_unram_terms`` -- integer (default ``None``) The
maximum number of entries shown in a coefficient of `p`. See
PRINTING below.
- ``print_max_terse_terms`` -- integer (default ``None``) The maximum
number of terms in the polynomial representation of an element
(using ``'terse'``). See PRINTING below.
- ``show_prec`` -- bool (default ``None``) Whether to show the precision
for elements. See PRINTING below.
- ``check`` -- bool (default ``True``) whether to check inputs.
- ``implementation`` -- string (default ``FLINT``) which
implementation to use. ``NTL`` is the other option.
OUTPUT:
- The corresponding unramified `p`-adic ring.
TYPES AND PRECISION:
There are two types of precision for a `p`-adic element. The first
is relative precision (default), which gives the number of known `p`-adic
digits::
sage: R.<a> = Zq(25, 20, 'capped-rel', print_mode='series'); b = 25*a; b
a*5^2 + O(5^22)
sage: b.precision_relative()
20
The second type of precision is absolute precision, which gives
the power of `p` that this element is defined modulo::
sage: b.precision_absolute()
22
There are many types of `p`-adic rings: capped relative rings
(``type='capped-rel'``), capped absolute rings
(``type='capped-abs'``), fixed modulus rings (``type='fixed-mod'``),
and floating point rings (``type='floating-point'``).
In the capped relative case, the relative precision of an element
is restricted to be at most a certain value, specified at the
creation of the field. Individual elements also store their own
precision, so the effect of various arithmetic operations on
precision is tracked. When you cast an exact element into a
capped relative field, it truncates it to the precision cap of the
field.::
sage: R.<a> = Zq(9, 5, 'capped-rel', print_mode='series'); b = (1+2*a)^4; b
2 + (2*a + 2)*3 + (2*a + 1)*3^2 + O(3^5)
sage: c = R(3249); c
3^2 + 3^4 + 3^5 + 3^6 + O(3^7)
sage: b | |
= None
if ctx.aliasExpr:
kwargs['alias'] = self.visit(ctx.aliasExpr)
name = self.visit(ctx.name)
workarea = self.visit(ctx.workArea)
if isinstance(workarea, float):
workarea = int(workarea)
return make_func_code('DB.use', name, workarea, opentype, **kwargs)
def visitLocate(self, ctx):
kwargs = OrderedDict()
scope, for_cond, while_cond, nooptimize = self.getQueryConditions(ctx.queryCondition())
if for_cond:
kwargs['for_cond'] = for_cond
if while_cond:
kwargs['while_cond'] = while_cond
scope = scope or ('rest',)
else:
scope = scope or ('all',)
if nooptimize:
kwargs['nooptimize'] = True
return make_func_code('DB.locate', **kwargs)
def visitContinueLocate(self, ctx):
return make_func_code('DB.continue_locate')
def visitAppendFrom(self, ctx):
if ctx.ARRAY():
return make_func_code('DB.insert', None, self.visit(ctx.expr()))
sourcename = self.visit(ctx.specialExpr(0))
kwargs = {}
if ctx.FOR():
kwargs['for_cond'] = add_args_to_code('lambda: {}', [self.visit(ctx.expr())])
if ctx.typeExpr:
kwargs['filetype'] = self.visit(ctx.typeExpr)
return make_func_code('DB.append_from', None, sourcename, **kwargs)
def visitAppend(self, ctx):
menupopup = not ctx.BLANK()
tablename = self.visit(ctx.specialExpr())
return make_func_code('DB.append', tablename, menupopup)
def visitInsert(self, ctx):
table = self.visit(ctx.specialExpr())
if ctx.ARRAY() or ctx.NAME() or ctx.MEMVAR():
values = self.visit(ctx.expr())
else:
values = self.visit(ctx.args())
fields = self.visit(ctx.specialArgs())
if fields:
if len(fields) != len(values):
raise Exception('number of fields must match number of values')
values = {field: value for field, value in zip(fields, values)}
else:
values = tuple(values)
return make_func_code('DB.insert', table, values)
def visitReplace(self, ctx):
value = self.visit(ctx.expr(0))
field = self.visit_with_disabled_scope(ctx.specialExpr(0))
scope, for_cond, while_cond, nooptimize = self.getQueryConditions(ctx.queryCondition())
scope = scope or ('next', 1)
if string_type(field):
field = field.lower().rsplit('.', 1)
tablename = field[0] if len(field) == 2 else None
field = field[-1]
else:
tablename = None
return make_func_code('DB.replace', tablename, scope, field, value)
def visitSkipRecord(self, ctx):
table = self.visit(ctx.specialExpr())
skipnum = self.visit(ctx.expr()) or 1
return make_func_code('DB.skip', table, skipnum)
def visitCopyTo(self, ctx):
copyTo = self.visit(ctx.specialExpr())
if ctx.STRUCTURE():
return make_func_code('DB.copy_structure', copyTo)
def visitDeleteRecord(self, ctx):
kwargs = OrderedDict()
scope, for_cond, while_cond, nooptimize = self.getQueryConditions(ctx.queryCondition())
scope = scope or ('next', 1)
name = self.visit(ctx.inExpr)
if for_cond:
kwargs['for_cond'] = for_cond
if while_cond:
kwargs['while_cond'] = while_cond
if ctx.RECALL():
kwargs['recall'] = True
return make_func_code('DB.delete_record', name, scope, **kwargs)
def visitPack(self, ctx):
if ctx.DATABASE():
return make_func_code('DB.pack_database')
elif ctx.DBF():
pack = 'dbf'
elif ctx.MEMO():
pack = 'memo'
else:
pack = 'both'
tablename = self.visit(ctx.tableName)
workarea = self.visit(ctx.workArea)
return make_func_code('DB.pack', pack, tablename, workarea)
def visitIndexOn(self, ctx):
field = self.visit(ctx.specialExpr()[0])
indexname = self.visit(ctx.specialExpr()[1])
tag_flag = not not ctx.TAG()
compact_flag = not not ctx.COMPACT()
if ctx.ASCENDING() and ctx.DESCENDING():
raise Exception('Invalid statement: {}'.format(self.getCtxText(ctx)))
order = 'descending' if ctx.DESCENDING() else 'ascending'
unique_flag = not not ctx.UNIQUE()
return make_func_code('DB.index_on', field, indexname, order, tag_flag, compact_flag, unique_flag)
def visitCount(self, ctx):
kwargs = OrderedDict()
scope, for_cond, while_cond, nooptimize = self.getQueryConditions(ctx.queryCondition())
if for_cond:
kwargs['for_cond'] = for_cond
if while_cond:
kwargs['while_cond'] = while_cond
scope = scope or ('rest',)
else:
scope = scope or ('all',)
if nooptimize:
kwargs['nooptimize'] = True
return add_args_to_code('{} = {}', (self.visit(ctx.toExpr), make_func_code('DB.count', None, scope, **kwargs)))
def visitSum(self, ctx):
kwargs = OrderedDict()
scope, for_cond, while_cond, nooptimize = self.getQueryConditions(ctx.queryCondition())
if for_cond:
kwargs['for_cond'] = for_cond
if while_cond:
kwargs['while_cond'] = while_cond
scope = scope or ('rest',)
else:
scope = scope or ('all',)
if nooptimize:
kwargs['nooptimize'] = True
sumexpr = add_args_to_code('lambda: {}', [self.visit(ctx.sumExpr)])
return add_args_to_code('{} = {}', (self.visit(ctx.toExpr), make_func_code('DB.sum', None, scope, sumexpr, **kwargs)))
def getQueryConditions(self, conditions):
scope, for_cond, while_cond, nooptimize = None, None, None, None
condition_types = [(condition.FOR() or condition.WHILE() or condition.NOOPTIMIZE() or type(condition.scopeClause())) for condition in conditions]
condition_types = [condition_type or condition_type.symbol.type for condition_type in condition_types]
if len(set(condition_types)) < len(condition_types):
raise Exception('Bad Query Condition')
for condition in conditions:
if condition.FOR():
for_cond = add_args_to_code('lambda: {}', [self.visit(condition.expr())])
if condition.WHILE():
while_cond = add_args_to_code('lambda: {}', [self.visit(condition.expr())])
if condition.scopeClause():
scope = self.visit(condition.scopeClause())
if condition.NOOPTIMIZE():
nooptimize = True
return scope, for_cond, while_cond, nooptimize
def visitReindex(self, ctx):
return make_func_code('DB.reindex', not not ctx.COMPACT())
def visitUpdateCmd(self, ctx):
table = self.visit(ctx.tableExpr)
set_fields = [(str(self.visit_with_disabled_scope(i)), self.visit(e)) for i, e in zip(ctx.identifier(), ctx.expr())]
kwargs = {}
if ctx.whereExpr:
kwargs['where'] = add_args_to_code('lambda: {}', [self.visit(ctx.whereExpr)])
if ctx.joinArgs:
kwargs['join'] = self.visit(ctx.joinArgs)
if ctx.fromArgs:
kwargs['from_args'] = self.visit(ctx.fromArgs)
return make_func_code('DB.update', table, set_fields, **kwargs)
def visitSeekRecord(self, ctx):
tablename = self.visit(ctx.tablenameExpr)
seek_expr = self.visit(ctx.seekExpr)
kwargs = OrderedDict()
if ctx.orderExpr or ctx.tagName:
kwargs['key_index'] = self.visit(ctx.orderExpr or ctx.tagName)
if ctx.cdxFileExpr or ctx.idxFileExpr:
kwargs['key_index_file'] = self.visit(ctx.cdxFileExpr or ctx.idxFileExpr)
if ctx.DESCENDING():
kwargs['descending'] = True
return make_func_code('DB.seek', tablename, seek_expr, **kwargs)
def visitZapTable(self, ctx):
return make_func_code('DB.zap', self.visit(ctx.specialExpr()))
def visitBrowse(self, ctx):
return make_func_code('DB.browse')
def visitScatterExpr(self, ctx):
kwargs = {}
if ctx.FIELDS():
fields = self.visit(ctx.args(0))
if ctx.LIKE():
kwargs['type'] = 'like'
elif ctx.EXCEPT():
kwargs['type'] = 'except'
kwargs['fields'] = fields
if ctx.MEMO():
kwargs['memo'] = True
if ctx.BLANK():
kwargs['blank'] = True
if ctx.NAME():
name = self.visit(ctx.expr(0))
if ctx.ADDITIVE():
kwargs['additive'] = name
kwargs['totype'] = 'name'
elif ctx.TO():
name = self.visit(ctx.expr(0))
kwargs['totype'] = 'array'
func = make_func_code('vfpfunc.scatter', **kwargs)
if not ctx.MEMVAR():
return add_args_to_code('{} = {}', (name, func))
return func
def visitGatherExpr(self, ctx):
kwargs = {}
if ctx.FIELDS():
kwargs['fields'] = self.visit(ctx.args(0))
if ctx.LIKE():
kwargs['type'] = 'like'
elif ctx.EXCEPT():
kwargs['type'] = 'except'
if ctx.MEMO():
kwargs['memo'] = True
if ctx.NAME() or ctx.FROM():
kwargs['val'] = self.visit(ctx.expr(0))
return make_func_code('vfpfunc.gather', **kwargs)
def visitScopeClause(self, ctx):
if ctx.ALL():
return 'all',
elif ctx.NEXT():
return 'next', self.visit(ctx.expr())
elif ctx.RECORD():
return 'record', self.visit(ctx.expr())
elif ctx.REST():
return 'rest',
def visitReport(self, ctx):
return make_func_code('vfpfunc.report_form', self.visit(ctx.specialExpr()))
def visitSetCmd(self, ctx):
setword = ctx.setword.text.lower()
kwargs = {'set_value': True}
args = ()
if ctx.BAR():
setword += ' bar'
if setword == 'printer':
if ctx.TO():
if ctx.DEFAULT():
kwargs.update({'Default': True})
elif ctx.NAME():
kwargs.update({'Name': self.visit(ctx.specialExpr()[0])})
elif ctx.specialExpr():
kwargs.update({'File': self.visit(ctx.specialExpr()[0])})
if ctx.ADDITIVE():
kwargs.update({'additive': True})
else:
args = ('ON' if ctx.ON() else 'OFF',)
kwargs.update({'prompt': True} if ctx.PROMPT() else {})
elif setword == 'typeahead':
args = (self.visit(ctx.expr()[0]),)
elif setword == 'procedure':
kwargs.update({'additive': True} if ctx.ADDITIVE() else {})
args = self.list_visit(ctx.specialExpr())
elif setword == 'bell':
args = ('TO', self.visit(ctx.specialExpr()[0])) if ctx.TO() else ('ON' if ctx.ON() else 'OFF',)
elif setword in ('cursor', 'deleted', 'escape', 'exact', 'exclusive', 'multilocks', 'near', 'status', 'status bar', 'tableprompt', 'talk', 'unique'):
args = ('ON' if ctx.ON() else 'OFF',)
elif setword == 'century':
if ctx.TO():
if len(ctx.expr()) > 0:
kwargs.update({'century': self.visit(ctx.expr()[0])})
else:
kwargs.update({'century': 19})
if len(ctx.expr()) > 1:
kwargs.update({'rollover': self.visit(ctx.expr()[1])})
else:
kwargs.update({'rollover': 67})
else:
args = ('ON' if ctx.ON() else 'OFF',)
elif setword == 'classlib':
args = (self.visit(ctx.specialExpr(0)),)
if ctx.IN():
kwargs['class_file'] = self.visit(ctx.specialExpr(1))
if ctx.ALIAS():
kwargs['alias'] = self.visit(ctx.specialExpr(2 if ctx.IN() else 1))
if ctx.ADDITIVE():
kwargs['additive'] = True
elif setword == 'compatible':
args = ('ON' if ctx.ON() or ctx.DB4() else 'OFF',)
if ctx.PROMPT() or ctx.NOPROMPT():
args = (args[0], 'PROMPT' if ctx.PROMPT() else 'NOPROMPT')
elif setword == 'sysmenu':
args = [x.symbol.text.lower() for x in (ctx.ON(), ctx.OFF(), ctx.TO(), ctx.SAVE(), ctx.NOSAVE()) if x]
if ctx.expr():
args += [self.visit(ctx.expr()[0])]
elif ctx.DEFAULT():
args += ['default']
elif setword == 'date':
args = (str(self.visit_with_disabled_scope(ctx.identifier())),)
elif setword == 'refresh':
args = self.list_visit(ctx.expr())
if len(args) < 2:
args.append(5)
elif setword == 'notify':
arg = 'ON' if ctx.ON() else 'OFF'
if ctx.CURSOR():
kwargs.update({'cursor': arg})
else:
args = (arg,)
elif setword == 'clock':
args = [x.symbol.text.lower() for x in (ctx.ON(), ctx.OFF(), ctx.TO(), ctx.STATUS()) if x]
if ctx.expr():
args += self.list_visit(ctx.expr())
elif setword == 'memowidth':
args = (self.visit(ctx.expr()[0]),)
elif setword == 'library':
kwargs.update({'additive': True} if ctx.ADDITIVE() else {})
args = self.list_visit(ctx.specialExpr())
elif setword == 'filter':
args = self.list_visit(ctx.specialExpr())
elif setword == 'order':
order = self.visit(ctx.specialExpr(0))
of_expr = self.visit(ctx.ofExpr)
in_expr = self.visit(ctx.inExpr)
kwargs.update({'descending': True} if ctx.DESCENDING() else {})
kwargs.update({'tag': True} if ctx.TAG() else {})
args = (order, of_expr, in_expr)
elif setword == 'index':
args = (self.visit(ctx.specialExpr(0)),)
elif setword == 'udfparms':
args = ['value' if ctx.VALUE() else 'reference']
else:
return
return make_func_code('vfpfunc.set', setword, *args, **kwargs)
def visitPush(self, ctx):
pass
def visitPop(self, ctx):
pass
def visitShellRun(self, ctx):
start, stop = ctx.getSourceInterval()
if ctx.identifier():
pass #Add /N options
start = ctx.identifier().getSourceInterval()[0]
tokens = ctx.parser._input.tokens[start + 1:stop + 1]
# FIXME: Need more cleanup on the arguments.
command = ''.join(create_string(tok.text) for tok in tokens).strip().split()
for i, arg in enumerate(command):
if arg.startswith('&'):
command[i] = CodeStr(arg[1:])
self.imports.append('import subprocess')
return make_func_code('subprocess.call', command)
def visitReturnStmt(self, ctx):
if not ctx.expr():
return [CodeStr('return')]
return [add_args_to_code('return {}', [self.visit(ctx.expr())])]
def visitAssert(self, ctx):
if ctx.expr(1):
return add_args_to_code('assert {}, {}', (self.visit(ctx.expr(0)), self.visit(ctx.expr(1))))
else:
return add_args_to_code('assert {}', (self.visit(ctx.expr(0)),))
def visitListStmt(self, ctx):
pass
def visitSaveToCmd(self, ctx):
pass
def visitUnlockCmd(self, ctx):
pass
def visitCompileCmd(self, ctx):
pass
def visitSortCmd(self, ctx):
pass
def visitCopyToArray(self, ctx):
pass
def visitRestoreCmd(self, ctx):
pass
def visitZoomCmd(self, ctx):
pass
def visitTextBlock(self, ctx):
kwargs = {}
if ctx.NOSHOW():
kwargs['show'] = False
text = self.visit(ctx.textChunk())
val = make_func_code('vfpfunc.text', text, **kwargs)
| |
<gh_stars>0
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import random
import numpy as np
import networkx as nx
from math import sqrt
from sklearn import preprocessing
from sklearn.metrics import mean_absolute_error, mean_squared_error
def encode_classes(col):
"""
Input: categorical vector of any type
Output: categorical vector of int in range 0-num_classes
"""
classes = set(col)
classes_dict = {c: i for i, c in enumerate(classes)}
labels = np.array(list(map(classes_dict.get, col)), dtype=np.int32)
return labels
def onehot_classes(col):
"""
Input: categorical vector of int in range 0-num_classes
Output: one-hot representation of the input vector
"""
col2onehot = np.zeros((col.size, col.max() + 1), dtype=float)
col2onehot[np.arange(col.size), col] = 1
return col2onehot
def miss_deg_num(nxg, mask):
"""
Input: networkx graph, mask representing missing node features
Outputs:
vector containing range of missing node features (sorted)
vector containing mean degree for each value in the range
vector containing the number of nodes in the graph for each
value in the range
"""
list_missingness = list(mask.shape[1] - mask.sum(axis=1))
t = []
for i, j in zip(list_missingness, nxg.degree):
t.append((int(i), j[1]))
d_deg = dict(
set(
(a, sum(y for x, y in t if x == a) / sum(1 for x, _ in t if x == a))
for a, b in t
)
)
d_num = dict((i, list_missingness.count(i)) for i in set(list_missingness))
sk = np.sort(list(d_deg.keys()))
deg = [d_deg[i] for i in sk]
num = [d_num[i] for i in sk]
return sk, deg, num
def degrade_dataset(X, missingness, rand, v):
"""
Inputs:
dataset to corrupt
% of data to eliminate[0,1]
rand random state
replace with = 'zero' or 'nan'
Outputs:
corrupted Dataset
binary mask
"""
X_1d = X.flatten()
n = len(X_1d)
mask_1d = np.ones(n)
# my modification
random.seed(rand)
corrupt_ids = random.sample(range(n), int(missingness * n))
for i in corrupt_ids:
X_1d[i] = v
mask_1d[i] = 0
cX = X_1d.reshape(X.shape)
mask = mask_1d.reshape(X.shape)
return cX, mask
def data2onehot(data, mask, num_cols, cat_cols):
"""
Inputs:
corrupted dataset
mask of the corruption
vector contaning indexes of columns having numerical values
vector contaning indexes of columns having categorical values
Outputs:
one-hot encoding of the dataset
one-hot encoding of the corruption mask
mask of the numerical entries of the one-hot dataset
mask of the categorical entries of the one-hot dataset
vector containing start-end idx for each categorical variable
"""
# find most frequent class
fill_with = []
for col in cat_cols:
l = list(data[:, col])
fill_with.append(max(set(l), key=l.count))
# meadian imputation
filled_data = data.copy()
for i, col in enumerate(cat_cols):
filled_data[:, col] = np.where(mask[:, col], filled_data[:, col], fill_with[i])
for i, col in enumerate(num_cols):
filled_data[:, col] = np.where(
mask[:, col], filled_data[:, col], np.nanmedian(data[:, col])
)
# encode into 0-N lables
for col in cat_cols:
filled_data[:, col] = encode_classes(filled_data[:, col])
num_data = filled_data[:, num_cols]
num_mask = mask[:, num_cols]
cat_data = filled_data[:, cat_cols]
cat_mask = mask[:, cat_cols]
# onehot encoding for masks and categorical variables
onehot_cat = []
cat_masks = []
for j in range(cat_data.shape[1]):
col = cat_data[:, j].astype(int)
col2onehot = np.zeros((col.size, col.max() + 1), dtype=float)
col2onehot[np.arange(col.size), col] = 1
mask2onehot = np.zeros((col.size, col.max() + 1), dtype=float)
for i in range(cat_data.shape[0]):
if cat_mask[i, j] > 0:
mask2onehot[i, :] = 1
else:
mask2onehot[i, :] = 0
onehot_cat.append(col2onehot)
cat_masks.append(mask2onehot)
# onehot_cat[0].shape=[303,x]
cat_starting_col = []
oh_data = num_data
oh_mask = num_mask
# build the big mask
for i in range(len(onehot_cat)):
cat_starting_col.append(oh_mask.shape[1])
oh_data = np.c_[oh_data, onehot_cat[i]]
oh_mask = np.c_[oh_mask, cat_masks[i]]
# oh_data.shape: 303,32
oh_num_mask = np.zeros(oh_data.shape)
oh_cat_mask = np.zeros(oh_data.shape)
# build numerical mask
oh_num_mask[:, range(num_data.shape[1])] = num_mask
# build categorical mask
oh_cat_cols = []
for i in range(len(cat_masks)):
start = cat_starting_col[i]
finish = start + cat_masks[i].shape[1]
oh_cat_mask[:, start:finish] = cat_masks[i]
oh_cat_cols.append((start, finish))
# import ipdb
# ipdb.set_trace()
return oh_data, oh_mask, oh_num_mask, oh_cat_mask, oh_cat_cols
def preprocess(data, mask, num_cols, cat_cols):
a, b, c, d, e = data2onehot(data, mask, num_cols, cat_cols)
l = list(a, b, c, d, e)
return l
def similarity(x, mx, y, my, metric, weight_missingness):
"""
Inputs:
feature vector
mask of the feature vector
feature vector
mask of the feature vector
boolean, weight or not the missing elements on the feature vector
metric, cosine similarity or euclidean similarity
Output:
similarity score of the two feature vectors
"""
xy_to_keep = np.multiply(mx, my)
if np.sum(xy_to_keep) < 1.0:
return 0.0
# keep elements present in both vectors
rx = np.multiply(x, xy_to_keep)
ry = np.multiply(y, xy_to_keep)
if metric == "cosine":
dot = np.dot(rx, ry)
den = np.linalg.norm(rx) * np.linalg.norm(ry)
sim = float(dot / max(den, 1e-6))
elif metric == "euclidean":
sim = 1 / (1 + np.linalg.norm(rx - ry))
if weight_missingness:
# compute weighting factor
ones = np.ones(xy_to_keep.shape)
wdot = np.dot(xy_to_keep, ones)
wden = np.linalg.norm(xy_to_keep) * np.linalg.norm(ones)
sim *= float(wdot / wden)
return sim
def similarity_matrix(X, mask, metric, weight_missingness):
"""
Inputs:
corrupted dataset
mask of the corruption
boolean, weight or not the missing elements on the feature vector
metric, cosine similarity or euclidean similarity
Output:
matrix containing pairwise similarity
"""
obs_len = len(X[:, 0])
M_cos_sim = np.zeros((obs_len, obs_len), dtype=float)
for i in range(0, obs_len):
for j in range(i, obs_len):
M_cos_sim[i][j] = similarity(
X[i], mask[i], X[j], mask[j], metric, weight_missingness
)
M_cos_sim[j][i] = M_cos_sim[i][j]
return M_cos_sim
def compute_weighted_adj(M, percentile):
"""
Inputs:
similarity matrix
percentile of connections to keep
Output:
weighted adjacency matrix
"""
if False:
m_len = len(M[0, :])
m_sample = []
for i in range(0, m_len):
for j in range(i, m_len):
if i != j:
m_sample.append(M[i][j])
treshold = np.percentile(m_sample, percentile)
M[M < treshold] = 0.0
np.fill_diagonal(M, 0.0)
else:
for i in range(M.shape[0]):
# first pruning
treshold = np.percentile(M[i], percentile)
M[i][M[i] < treshold] = 0.0
# second pruning
v = M.flatten()
treshold = np.percentile(v, percentile)
M[M < treshold] = 0.0
np.fill_diagonal(M, 0.0)
# 让对角线元素为0
# import ipdb
# ipdb.set_trace()
return M
def dataset2nxg(cX, mask, percentile, metric, weight_missingness):
"""
Inputs:
corrupted dataset
mask of the corruption
percentile of connections to keep
boolean, weight or not the missing elements on the feature vector
metric, cosine similarity or euclidean similarity
Outputs:
networkx MultiDiGraph
"""
cX_sim = similarity_matrix(cX, mask, metric, weight_missingness)
# 对称阵
cX_wadj = compute_weighted_adj(cX_sim, percentile)
ngx = nx.DiGraph(cX_wadj)
return ngx
def new_edges(x_tr, mask_tr, x_te, mask_te, percentile, metric, weight_missingness):
"""
Inputs:
old dataset
old dataset mask of the corruption
new dataset
new dataset mask of the corruption
percentile of connections to keep
boolean, weight or not the missing elements on the feature vector
metric, cosine similarity or euclidean similarity
Output:
list containg [src, dest] of edges that needs to be added in order
to integrate the new dataset in the older one
"""
M_sim = np.zeros((x_te.shape[0], np.r_[x_tr, x_te].shape[0]))
for i in range(x_te.shape[0]):
for j in range(x_tr.shape[0]):
M_sim[i][j] = similarity(
x_tr[j], mask_tr[j], x_te[i], mask_te[i], metric, weight_missingness
)
for i in range(x_te.shape[0]):
for j in range(0, i):
M_sim[i][x_tr.shape[0] + j] = similarity(
x_te[i], mask_te[i], x_te[j], mask_te[j], metric, weight_missingness
)
if False:
# One step pruning
treshold = np.percentile(M_sim.flatten(), percentile)
edges = np.argwhere(M_sim >= treshold)
else:
# Two steps pruning
for i in range(M_sim.shape[0]):
treshold = np.percentile(M_sim[i], percentile)
M_sim[i][M_sim[i] < treshold] = 0.0
treshold = np.percentile(M_sim.flatten(), percentile)
M_sim[M_sim < treshold] = 0.0
edges = np.argwhere(M_sim > 0.0)
return edges
def imputation_accuracy(target, output, inv_mask):
"""
Inputs:
target dataset
imputed dataset
inverse of the matrix used for the dataset corruption
Outputs:
mean absolute error
mean root mean squared error
"""
mae = mean_absolute_error(target[inv_mask], output[inv_mask])
rmse = sqrt(mean_squared_error(target[inv_mask], output[inv_mask]))
return mae, rmse
def proper_onehot(x, oh_cat_cols):
"""
Inputs:
dataset with soft categorical variables
vector containing start-end idx for each categorical variable
Output: dataset with hard categorical variables
"""
for start, finish in oh_cat_cols:
x[:, start:finish] = (
x[:, start:finish] == x[:, start:finish].max(1)[:, None]
).astype(int)
return x
def batch_mask(size, batch_size):
"""
Inputs:
lenght of | |
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
d08=pd.read_csv("1902HackDATA/IPL2008CSV.csv")
#***Basic/General/Normal Information
d08.head()
d08.dtypes
d08.info()
d08.describe()
# In[3]:
d08
# In[4]:
d08['Strike Rate1']=None
d08['Economy1']=None
# In[5]:
for i in range(d08.shape[0]):
try:
d08['Strike Rate1'][i]=float(d08['Strike Rate'][i])
except ValueError:
d08['Strike Rate1'][i]=0.0
try:
d08['Economy1'][i]=float(d08['Economy'][i])
except ValueError:
d08['Economy1'][i]=0.0
# In[6]:
pd.set_option('display.max_rows',500)
pd.set_option('display.max_columns',100)
#pd.reset_option('display.max_rows')
#pd.reset_option('display.max_columns')
#d08.isnull().any()
d08
# In[7]:
for x in d08['Strike Rate1']:
if(x<0):
print('True')
print('No_one')
for x in d08['Economy1']:
if(x<0):
print('True1')
print('No_one1')
# In[8]:
d08.isnull().any()
# In[9]:
print([x for x in d08])
# In[10]:
for x in ['Runs', 'Balls', 'Balls Bowled', 'Runs Conceded', 'Wickets', 'Ct_St',
'Run Outs', 'Matches Played ', 'Age', 'Strike Rate1', 'Economy1']:
print(x, min(d08[x]), max(d08[x]))
# In[11]:
pdtr=['Year','RunMi','RunMx','BallMi','BallMx',"4'sMi","4'sMx","6'sMi","6'sMx",'Highest Run Scored Mi','Highest Run Scored Mx','Balls Bowled Mi',
'Balls Bowled Mx','Runs Conceded Mi','Runs Conceded Mx','WicketsMi','WicketsMx','Ct_StMi','Ct_StMx','Run Outs Mi', 'Run Outs Mx','Matches Played Mi',
'Matches Played Mx','Strike Rate1 Mi','Strike Rate1 Mx','Economy1Mi','Economy1Mx']
# In[12]:
len(d08['Player'].unique())
# In[13]:
fo=open('1902HackData/PdtrCSV.csv', 'w+')
fo.write('Sl.No')
for a in pdtr:
fo.write(',')
fo.write(a)
fo.write('\n')
# In[14]:
from matplotlib.lines import Line2D
clr=['blue','red']
stl=[Line2D([0],[0], color=c, linewidth=3, linestyle='--') for c in clr]
plt.scatter(d08['Runs'],d08['Strike Rate1'],c='blue')
plt.scatter(d08['Runs'],d08['Balls'],c='red')
lbl=['RunVsStrike Rate', 'RunVsBalls']
plt.legend(stl, lbl)
plt.ylim((0,400))
plt.xlabel('Run',fontsize=16)
plt.ylabel('StrikeRate/Balls', fontsize=16)
plt.title('Scatter Visulization',fontsize=16)
# In[15]:
plt.scatter(d08['Matches Played '],d08['Strike Rate1'],c='blue')
plt.xlabel('Matches Played', fontsize=16)
plt.ylabel('Strike Rate', fontsize=16)
plt.title('Scatter Visulization',fontsize=16)
# In[16]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d08['Runs Conceded'],d08['Economy1'],c='green')
plt.scatter(d08['Balls Bowled'],d08['Economy1'],c='yellow')
plt.scatter(d08['Wickets'],d08['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('Economy',fontsize=16)
plt.ylabel('RunsConceded/BallsBowled/Wickets', fontsize=12)
plt.title('Scatter Visulization',fontsize=16)
# In[17]:
plt.scatter(d08['Matches Played '], d08['Economy1'],c='blue')
plt.xlabel('Matches Played')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=16)
# In[18]:
lbl=['Ct_St','Run Outs']
plt.scatter(d08['Matches Played '],d08['Ct_St'],c='red')
plt.scatter(d08['Matches Played '],d08['Run Outs'],c='pink')
plt.legend(lbl)
plt.xlabel('Matches Played')
plt.ylabel('Ct_St/Run Outs')
plt.title('Scatter Visulization',fontsize=14)
# In[19]:
val=['2008','0','410','0','300','0' ,'0' ,'0' ,'0' ,'12' ,'48','15','310',
'10','450','0','9','2','8','0','0','1','14','80','160','6','13']
fo.write('1')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
# In[20]:
d09=pd.read_csv("1902HackDATA/IPL2009CSV.csv")
#***Basic/General/Normal Information
d09.head()
d09.dtypes
d09.info()
d09.describe()
# In[21]:
d09
# In[22]:
d09.shape
# In[23]:
d09.isnull().any()
# In[24]:
d09['Strike Rate1']=None
d09['Economy1']=None
# In[25]:
for i in range(d09.shape[0]):
try:
d09['Strike Rate1'][i]=float(d09['Strike Rate'][i])
except ValueError:
d09['Strike Rate1'][i]=0.0
try:
d09['Economy1'][i]=float(d09['Economy'][i])
except ValueError:
d09['Economy1'][i]=0.0
# In[26]:
pd.set_option('display.max_rows',500)
pd.set_option('display.max_columns',100)
#pd.reset_option('display.max_rows')
#pd.reset_option('display.max_columns')
#d09.isnull().any()
d09
# In[27]:
d09.isnull().any()
# In[28]:
len(d09['Player'].unique())
# In[29]:
lbl=['Strike Rate1', 'Balls']
plt.scatter(d09['Runs'],d09['Strike Rate1'],c='blue')
plt.scatter(d09['Runs'],d09['Balls'],c='red')
plt.legend(lbl)
plt.xlabel('Run')
plt.ylabel("StrikeRate/Balls")
plt.title('Scatter Visulization',fontsize=14)
# In[30]:
plt.scatter(d09['Matches Played'],d09['Strike Rate1'],c='blue')
plt.xlabel('Matches Played')
plt.ylabel('Strike Rate')
plt.title('Scatter Visulization',fontsize=14)
# In[31]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d09['Runs Conceded'],d09['Economy1'],c='green')
plt.scatter(d09['Balls Bowled'],d09['Economy1'],c='yellow')
plt.scatter(d09['Wickets'],d09['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('RunsConceded/BallsBowled/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[32]:
lbl=['Matches Played', 'Wickets']
plt.scatter(d09['Matches Played'], d09['Economy1'],c='blue')
plt.scatter(d09['Wickets'],d09['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[33]:
lbl=['Ct_St','Run Outs']
plt.scatter(d09['Matches Played'],d09['Ct_St'],c='red')
plt.scatter(d09['Matches Played'],d09['Run Outs'],c='pink')
plt.legend(lbl)
plt.xlabel('Matches Played')
plt.ylabel('Ct_St/RunOuts')
plt.title('Scatter Visulization',fontsize=14)
# In[34]:
val=['2009','0','390','0','300','0' ,'0' ,'0' ,'0' ,'12' ,'48','5','300','4','400',
'0','15','2.5','10','0','0','1','16','90','150','5','10']
fo.write('2')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
# In[35]:
d10=pd.read_csv("1902HackDATA/IPL2010CSV.csv")
#***Basic/General/Normal Information
d10.head()
d10.dtypes
d10.info()
d10.describe()
# In[36]:
d10
# In[37]:
d10.isnull().any()
# In[38]:
artt=[x for x in d10]
print(artt)
# In[39]:
d10['Strike Rate1']=None
d10['Economy1']=None
# In[40]:
martt=['Runs', 'Balls', 'Four', 'Six', 'Strike Rate', 'Balls Bowled', 'Runs Conceded',
'Economy', 'Wickets']
for x in martt:
d10[x].fillna(0, inplace=True)
# In[41]:
d10.isnull().any()
# In[42]:
for i in range(d10.shape[0]):
try:
d10['Strike Rate1'][i]=float(d10['Strike Rate'][i])
except ValueError:
d10['Strike Rate1'][i]=0.0
try:
d10['Economy1'][i]=float(d10['Economy'][i])
except ValueError:
d10['Economy1'][i]=0.0
# In[43]:
#pd.set_option('display.max_rows',500)
#pd.set_option('display.max_columns',100)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
d10.isnull().any()
d10
# In[44]:
print([x for x in d10], '\n')
print(len(d10['Player Name'].unique()))
# In[45]:
lbl=['Strike Rate', 'Balls']
plt.scatter(d10['Runs'],d10['Strike Rate1'],c='blue')
plt.scatter(d10['Runs'],d10['Balls'],c='red')
plt.legend(lbl)
plt.xlabel('Run')
plt.ylabel('StrikeRate/Balls')
plt.title('Scatter Visulization',fontsize=14)
# In[46]:
lbl=['Strike Rate','Run Outs','Four', 'Six']
plt.scatter(d10['Matches Played'],d10['Strike Rate1'],c='blue')
plt.scatter(d10['Matches Played'],d10['Run Outs'],c='black')
plt.scatter(d10['Matches Played'],d10['Four'],c='green')
plt.scatter(d10['Matches Played'],d10['Six'],c='red')
plt.legend(lbl)
plt.xlabel('Matches Played')
plt.ylabel('StrikeRate/Run Outs/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[47]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d10['Runs Conceded'],d10['Economy1'],c='green')
plt.scatter(d10['Balls Bowled'],d10['Economy1'],c='yellow')
plt.scatter(d10['Wickets'],d10['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('RunsConceded/BallsBowled/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[48]:
lbl=['Matches Played', 'Wickets']
plt.scatter(d10['Matches Played'], d10['Economy1'],c='blue')
plt.scatter(d10['Wickets'],d10['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[49]:
lbl=['Ct_St','Run Outs','Four', 'Six']
plt.scatter(d10['Matches Played'],d10['ct_st'],c='red')
plt.scatter(d10['Matches Played'],d10['Run Outs'],c='black')
plt.scatter(d10['Matches Played'],d10['Four'],c='blue')
plt.scatter(d10['Matches Played'],d10['Six'],c='green')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('ct_st/Run Outs/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[50]:
val=['2010','0','450','0','380','10' ,'50' ,'0' ,'15' ,'12' ,'50','6','300','10','400',
'0','15','5','15','0','2.5','1','16','120','190','7','10']
fo.write('3')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
# In[51]:
d11=pd.read_csv("1902HackDATA/IPL2011CSV.csv")
#***Basic/General/Normal Information
d11.head()
d11.dtypes
d11.info()
d11.describe()
# In[52]:
d11
# In[53]:
d11.isnull().any()
# In[54]:
print([x for x in d11], '\n', len(d09['Player'].unique()))
# In[55]:
martt11=['Runs', 'Balls ', 'Strike Rate', 'Four', 'Six', 'Balls Bowled', 'Runs Conceded',
'Wicket', 'Economy']
for x in martt11:
d11[x].fillna(0, inplace=True)
# In[56]:
d11['Strike Rate1']=None
d11['Economy1']=None
# In[57]:
for i in range(d11.shape[0]):
try:
d11['Strike Rate1'][i]=float(d11['Strike Rate'][i])
except ValueError:
d11['Strike Rate1'][i]=0.0
try:
d11['Economy1'][i]=float(d11['Economy'][i])
except ValueError:
d11['Economy1'][i]=0.0
# In[58]:
d11.isnull().any()
# In[59]:
pd.set_option('display.max_rows',500)
pd.set_option('display.max_columns',100)
#pd.reset_option('display.max_rows')
#pd.reset_option('display.max_columns')
#d11.isnull().any()
d11
# In[60]:
print([x for x in d11], '\n')
print(len(d11['Player Name'].unique()))
# In[61]:
lbl=['Strike Rate', 'Balls']
plt.scatter(d11['Runs'],d11['Strike Rate1'],c='blue')
plt.scatter(d11['Runs'],d11['Balls '],c='red')
plt.legend(lbl)
plt.xlabel('Run')
plt.ylabel('StrikeRate/Balls')
plt.title('Scatter Visulization',fontsize=14)
# In[62]:
lbl=['Strike Rate','Run Outs','Four', 'Six']
plt.scatter(d11['Matches Played'],d11['Strike Rate1'],c='blue')
plt.scatter(d11['Matches Played'],d11['Run Outs'],c='black')
plt.scatter(d11['Matches Played'],d11['Four'],c='green')
plt.scatter(d11['Matches Played'],d11['Six'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('StrikeRate/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[63]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d11['Runs Conceded'],d11['Economy1'],c='green')
plt.scatter(d11['Balls Bowled'],d11['Economy1'],c='yellow')
plt.scatter(d11['Wicket'],d11['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('RunsConcede/BallsBowled/Wicket')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[64]:
lbl=['Matches Played', 'Wickets']
plt.scatter(d11['Matches Played'], d11['Economy1'],c='blue')
plt.scatter(d11['Wicket'],d11['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed/Wicket')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[65]:
lbl=['Ct_St','Run Outs','Four', 'Six']
plt.scatter(d11['Matches Played'],d11['ct_st'],c='red')
plt.scatter(d11['Matches Played'],d11['Run Outs'],c='black')
plt.scatter(d11['Matches Played'],d11['Four'],c='blue')
plt.scatter(d11['Matches Played'],d11['Six'],c='green')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('ct_st/Run Outs/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[66]:
val=['2011','0','480','0','400','20' ,'55' ,'5' ,'20' ,'12' ,'50','12','300','10','400','4',
'15','8','20','0','2.5','1','16','130','200','7','12']
fo.write('4')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
# In[67]:
d12=pd.read_csv("1902HackDATA/IPL2012CSV.csv")
#***Basic/General/Normal Information
d12.head()
d12.dtypes
d12.info()
d12.describe()
# In[68]:
d12.isnull().any()
# In[69]:
d12['Strike Rate1']=None
d12['Economy1']=None
# In[70]:
for i in range(d12.shape[0]):
try:
d12['Strike Rate1'][i]=float(d12['Strike Rate'][i])
except ValueError:
d12['Strike Rate1'][i]=0.0
try:
d12['Economy1'][i]=float(d12['Economy'][i])
except ValueError:
d12['Economy1'][i]=0.0
# In[71]:
#pd.set_option('display.max_rows',500)
#pd.set_option('display.max_columns',100)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
d12
# In[72]:
#d12.isnull().any()
print([x for x in d12], '\n')
print(len(d12["Player's Name"].unique()))
# In[73]:
lbl=['Strike Rate', 'Balls']
plt.scatter(d12['Run'],d12['Strike Rate1'],c='blue')
plt.scatter(d12['Run'],d12['Ball'],c='red')
plt.legend(lbl)
plt.xlabel('Run')
plt.ylabel('StrikeRate/Ball')
plt.title('Scatter Visulization',fontsize=14)
# In[74]:
lbl=['Strike Rate','Four', 'Six']
plt.scatter(d12['Matches Played'],d12['Strike Rate1'],c='blue')
plt.scatter(d12['Matches Played'],d12['Four'],c='green')
plt.scatter(d12['Matches Played'],d12['Six'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('StrikeRate/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[75]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d12['Runs Concede'],d12['Economy1'],c='green')
plt.scatter(d12['Balls Bowled'],d12['Economy1'],c='yellow')
plt.scatter(d12['Wickets'],d12['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('RunsConcede/BallsBowled/Wicket')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[76]:
lbl=['Matches Played', 'Wickets']
plt.scatter(d12['Matches Played'], d12['Economy1'],c='blue')
plt.scatter(d12['Wickets'],d12['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[77]:
lbl=['Ct_St','Four', 'Six']
plt.scatter(d12['Matches Played'],d12['Ct_St'],c='red')
plt.scatter(d12['Matches Played'],d12['Four'],c='blue')
plt.scatter(d12['Matches Played'],d12['Six'],c='green')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('Ct_St/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[78]:
val=['2012','0','495','0','480','25' ,'60' ,'3' ,'10' ,'0' ,'0','12','350','20','380','5',
'18','8','10','0','2','1','17','130','170','7.1','9']
fo.write('5')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
# In[79]:
d13=pd.read_csv("1902HackDATA/IPL2013CSV.csv")
#***Basic/General/Normal Information
d13.head()
d13.dtypes
d13.info()
d13.describe()
# In[80]:
#d13
d13.isnull().any()
# In[81]:
d13['Strike Rate1']=None
d13['Economy1']=None
# In[82]:
for i in range(d13.shape[0]):
try:
d13['Strike Rate1'][i]=float(d13['Strike Rate'][i])
except ValueError:
d13['Strike Rate1'][i]=0.0
try:
d13['Economy1'][i]=float(d13['Economy'][i])
except ValueError:
d13['Economy1'][i]=0.0
# In[83]:
#pd.set_option('display.max_rows',500)
#pd.set_option('display.max_columns',100)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
#d13.isnull().any()
d13
# In[84]:
print([x for x in d13], '\n')
print(len(d13["Player's Name"].unique()))
# In[85]:
lbl=['Strike Rate', 'Balls']
plt.scatter(d13['Run'],d13['Strike Rate1'],c='blue')
plt.scatter(d13['Run'],d13['Ball'],c='red')
plt.legend(lbl)
plt.xlabel('Run')
plt.ylabel('StrikeRate/Ball')
plt.title('Scatter Visulization',fontsize=14)
# In[86]:
lbl=['Balls','Strike Rate']
plt.scatter(d13['Highest Run Scored'],d13['Ball'],c='red')
plt.scatter(d13['Highest Run Scored'],d13['Strike Rate1'],c='green')
plt.legend(lbl)
plt.xlabel('HighestRunScored')
plt.ylabel('Ball/Strike Rate')
plt.title('Scatter Visulization',fontsize=14)
# In[87]:
lbl=['Strike Rate', 'Run Outs', 'Four', 'Six']
plt.scatter(d13['Matches Played'],d13['Strike Rate1'],c='blue')
plt.scatter(d13['Matches Played'],d13['Run Outs'],c='black')
plt.scatter(d13['Matches Played'],d13['Four'],c='green')
plt.scatter(d13['Matches Played'],d13['Six'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('StrikeRate/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[88]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d13['RunsConceded'],d13['Economy1'],c='green')
plt.scatter(d13['BallsBowled'],d13['Economy1'],c='yellow')
plt.scatter(d13['Wickets'],d13['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('RunsConcede/BallsBowled/Wicket')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[89]:
lbl=['Matches Played', 'Wickets']
plt.scatter(d13['Matches Played'], d13['Economy1'],c='blue')
plt.scatter(d13['Wickets'],d13['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[90]:
lbl=['Ct_St', 'Run Outs', 'Four', 'Six']
plt.scatter(d13['Matches Played'],d13['Ct_St'],c='red')
plt.scatter(d13['Matches Played'],d13['Run Outs'],c='black')
plt.scatter(d13['Matches Played'],d13['Four'],c='blue')
plt.scatter(d13['Matches Played'],d13['Six'],c='green')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('Ct_St/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[91]:
val=['2013','0','540','0','450','28' ,'65' ,'10' ,'20' ,'20' ,'100','12','405',
'10','490','7','11','10','20','0','2.5','1','18','130','180','7.5','10.5']
fo.write('6')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
# In[92]:
d14=pd.read_csv("1902HackDATA/IPL2014CSV.csv")
#***Basic/General/Normal Information
d14.head()
d14.dtypes
d14.info()
d14.describe()
# In[93]:
#d14
d14.isnull().any()
# In[94]:
d14['Strike Rate1']=None
d14['Economy1']=None
# In[95]:
for i in range(d14.shape[0]):
try:
d14['Strike Rate1'][i]=float(d14['Strike Rate'][i])
except ValueError:
d14['Strike Rate1'][i]=0.0
try:
d14['Economy1'][i]=float(d14['Economy'][i])
except ValueError:
d14['Economy1'][i]=0.0
# In[96]:
#pd.set_option('display.max_rows',500)
#pd.set_option('display.max_columns',100)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
#d14.isnull().any()
d14
# In[97]:
print([x for x in d14], '\n')
print(len(d14['Players Name'].unique()))
# In[98]:
lbl=['Strike Rate', 'Balls']
plt.scatter(d14['Run'],d14['Strike Rate1'],c='blue')
plt.scatter(d14['Run'],d14['Ball'],c='red')
plt.legend(lbl)
plt.xlabel('Run')
plt.ylabel('StrikeRate/Ball')
plt.title('Scatter Visulization',fontsize=14)
# In[99]:
lbl=['Balls','Strike Rate']
plt.scatter(d14['Highest Run Scored'],d14['Ball'],c='red')
plt.scatter(d14['Highest Run Scored'],d14['Strike Rate1'],c='green')
plt.legend(lbl)
plt.xlabel('HighestRunScored')
plt.ylabel('Ball/StrikeRate')
plt.title('Scatter Visulization',fontsize=14)
# In[100]:
lbl=['Strike Rate', 'Run Outs', 'Four', 'Six']
plt.scatter(d14['Matches Played'],d14['Strike Rate1'],c='blue')
plt.scatter(d14['Matches Played'],d14['Run Outs'],c='black')
plt.scatter(d14['Matches Played'],d14['Four'],c='green')
plt.scatter(d14['Matches Played'],d14['Six'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('StrikeRate/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[101]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d14['Runs Scored'],d14['Economy1'],c='green')
plt.scatter(d14['Balls Bowled'],d14['Economy1'],c='yellow')
plt.scatter(d14['Wickets'],d14['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('RunsConceded/BallsBowled/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[102]:
lbl=['Matches Played', 'Wickets']
plt.scatter(d14['Matches Played'], d14['Economy1'],c='blue')
plt.scatter(d14['Wickets'],d14['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[103]:
lbl=['Ct_St', 'Run Outs', 'Four', 'Six']
plt.scatter(d14['Matches Played'],d14['Ct_St'],c='red')
plt.scatter(d14['Matches Played'],d14['Run Outs'],c='black')
plt.scatter(d14['Matches Played'],d14['Four'],c='blue')
plt.scatter(d14['Matches Played'],d14['Six'],c='green')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('Ct_St/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[104]:
val=['2014','10','450','20','390','25' ,'50' ,'5' ,'24' ,'10' ,'100','12','390','20',
'490','2','19','8','18','0','2.5','1','16','100','150','7','12']
fo.write('7')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
# In[105]:
d15=pd.read_csv("1902HackDATA/IPL2015CSV.csv")
#***Basic/General/Normal Information
d15.head()
d15.dtypes
d15.info()
d15.describe()
# In[106]:
d15.isnull().any()
# In[107]:
d15['Strike Rate1']=None
d15['Economy1']=None
# In[108]:
for i in range(d15.shape[0]):
try:
d15['Strike Rate1'][i]=float(d15['Strike Rate'][i])
except ValueError:
d15['Strike Rate1'][i]=0.0
try:
d15['Economy1'][i]=float(d15['Economy'][i])
except ValueError:
d15['Economy1'][i]=0.0
# In[109]:
#pd.set_option('display.max_rows',500)
#pd.set_option('display.max_columns',100)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
#d14.isnull().any()
d15
# In[110]:
print([x for x in d15], '\n')
print(len(d15["Player's Name"].unique()))
# In[111]:
lbl=['Strike Rate', 'Balls']
plt.scatter(d15['Run'],d15['Strike Rate1'],c='blue')
plt.scatter(d15['Run'],d15['Ball'],c='red')
plt.legend(lbl)
plt.xlabel('Run')
plt.ylabel('StrikeRate/Ball')
plt.title('Scatter Visulization',fontsize=14)
# In[112]:
lbl=['Balls','Strike Rate']
plt.scatter(d15['Highest Run Scored'],d15['Ball'],c='red')
plt.scatter(d15['Highest Run Scored'],d15['Strike Rate1'],c='green')
plt.legend(lbl)
plt.xlabel('HighestRunScored')
plt.ylabel('Ball/StrikeRate')
plt.title('Scatter Visulization',fontsize=14)
# In[113]:
lbl=['Strike Rate', 'Run Outs', 'Four', 'Six']
plt.scatter(d15['Matches Played'],d15['Strike Rate1'],c='blue')
plt.scatter(d15['Matches Played'],d15['Run Outs'],c='black')
plt.scatter(d15['Matches Played'],d15['Four'],c='green')
plt.scatter(d15['Matches Played'],d15['Six'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('StrikeRate/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[114]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d15['Runs Conceded'],d15['Economy1'],c='green')
plt.scatter(d15['Balls Bowled'],d15['Economy1'],c='yellow')
plt.scatter(d15['Wickets'],d15['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('RunsConceded/BallsBowled/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[115]:
lbl=['Matches Played', 'Wickets']
plt.scatter(d15['Matches Played'], d15['Economy1'],c='blue')
plt.scatter(d15['Wickets'],d15['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[116]:
lbl=['Ct_St', 'Run Outs', 'Four', 'Six']
plt.scatter(d15['Matches Played'],d15['Ct_St'],c='red')
plt.scatter(d15['Matches Played'],d15['Run Outs'],c='black')
plt.scatter(d15['Matches Played'],d15['Four'],c='blue')
plt.scatter(d15['Matches Played'],d15['Six'],c='green')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('Ct_St/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[117]:
val=['2015','20','480','30','330','15' ,'40' ,'5' ,'12' ,'10' ,'110','12','330','10',
'410','7','22','8','16','0','3.1','1','17','125','180','7.8','11']
fo.write('8')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
# In[118]:
d16=pd.read_csv("1902HackDATA/IPL2016CSV.csv")
#***Basic/General/Normal Information
d16.head()
d16.dtypes
d16.info()
d16.describe()
# In[119]:
d16.isnull().any()
# In[120]:
d16['Ct_St'].fillna(0, inplace=True)
# In[121]:
d16['Strike Rate1']=None
d16['Economy1']=None
# In[122]:
for i in range(d16.shape[0]):
try:
d16['Strike Rate1'][i]=float(d16['Strike Rate'][i])
except ValueError:
d16['Strike Rate1'][i]=0.0
try:
d16['Economy1'][i]=float(d16['Economy'][i])
except ValueError:
d16['Economy1'][i]=0.0
# In[123]:
#pd.set_option('display.max_rows',500)
#pd.set_option('display.max_columns',100)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
#d16.isnull().any()
d16
# In[124]:
print([x for x in d16], '\n')
print(len(d16["Player's Name"].unique()))
# In[125]:
lbl=['Strike Rate', 'Balls']
plt.scatter(d16['Run'],d16['Strike Rate1'],c='blue')
plt.scatter(d16['Run'],d16['Ball'],c='red')
plt.legend(lbl)
plt.xlabel('Run')
plt.ylabel('StrikeRate/Ball')
plt.title('Scatter Visulization',fontsize=14)
# In[126]:
lbl=['Balls','Strike Rate']
plt.scatter(d16['Highest Run Scored'],d16['Ball'],c='red')
plt.scatter(d16['Highest Run Scored'],d16['Strike Rate1'],c='green')
plt.legend(lbl)
plt.xlabel('HigestRunScored')
plt.ylabel('Ball/StrikeRate')
plt.title('Scatter Visulization',fontsize=14)
# In[127]:
lbl=['Strike Rate', 'Run Outs', 'Four', 'Six']
plt.scatter(d16['Matches Played'],d16['Strike Rate1'],c='blue')
plt.scatter(d16['Matches Played'],d16['Run Outs'],c='black')
plt.scatter(d16['Matches Played'],d16['Four'],c='green')
plt.scatter(d16['Matches Played'],d16['Six'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('StrikeRate/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[128]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d16['Runs Conceded'],d16['Economy1'],c='green')
plt.scatter(d16['Balls Bowled'],d16['Economy1'],c='yellow')
plt.scatter(d16['Wickets'],d16['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('RunConcede/BallBowled/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[129]:
lbl=['Matches Played', 'Wickets']
plt.scatter(d16['Matches Played'], d16['Economy1'],c='blue')
plt.scatter(d16['Wickets'],d16['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[130]:
lbl=['Ct_St', 'Run Outs', 'Four', 'Six']
plt.scatter(d16['Matches Played'],d16['Ct_St'],c='red')
plt.scatter(d16['Matches Played'],d16['Run Outs'],c='black')
plt.scatter(d16['Matches Played'],d16['Four'],c='blue')
plt.scatter(d16['Matches Played'],d16['Six'],c='green')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('Ct_St/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[131]:
val=['2016','30','500','40','470','18' ,'60' ,'8' ,'24' ,'10' ,'113','12','360','10','410',
'3','20','8','20','0','2.5','1','17','100','160','7.5','10']
fo.write('9')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
# In[132]:
d17=pd.read_csv("1902HackDATA/IPL2017CSV.csv")
#***Basic/General/Normal Information
d17.head()
d17.dtypes
d17.info()
d17.describe()
# In[133]:
d17.isnull().any()
# In[134]:
d17['Six'].fillna(0, inplace=True)
# In[135]:
d17['Strike Rate1']=None
d17['Economy1']=None
# In[136]:
for i in range(d17.shape[0]):
try:
d17['Strike Rate1'][i]=float(d17['Strike Rate'][i])
except ValueError:
d17['Strike Rate1'][i]=0.0
try:
d17['Economy1'][i]=float(d17['Economy'][i])
except ValueError:
d17['Economy1'][i]=0.0
# In[137]:
#pd.set_option('display.max_rows',500)
#pd.set_option('display.max_columns',100)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
#d17.isnull().any()
d17
# In[138]:
print([x for x in d17], '\n')
print(len(d17["Player's Name"].unique()))
# In[139]:
lbl=['Strike Rate', 'Balls']
plt.scatter(d17['Run'],d17['Strike Rate1'],c='blue')
plt.scatter(d17['Run'],d17['Ball'],c='red')
plt.legend(lbl)
plt.xlabel('Run')
plt.ylabel('StrikeRate/Ball')
plt.title('Scatter Visulization',fontsize=14)
# In[140]:
lbl=['Balls','Strike Rate']
plt.scatter(d17['Highest Run Scored'],d17['Ball'],c='red')
plt.scatter(d17['Highest Run Scored'],d17['Strike Rate1'],c='green')
plt.legend(lbl)
plt.xlabel('HighestRunScored')
plt.ylabel('Ball/StrikeRate')
plt.title('Scatter Visulization',fontsize=14)
# In[141]:
lbl=['Strike Rate', 'Run Outs', 'Four', 'Six']
plt.scatter(d17['Matches Played'],d17['Strike Rate1'],c='blue')
plt.scatter(d17['Matches Played'],d17['Run Outs'],c='black')
plt.scatter(d17['Matches Played'],d17['Four'],c='green')
plt.scatter(d17['Matches Played'],d17['Six'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('StrikeRate/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[142]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d17['Runs Conceded'],d17['Economy1'],c='green')
plt.scatter(d17['Balls Bowled'],d17['Economy1'],c='yellow')
plt.scatter(d17['Wickets'],d17['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('RunConceded/BallBowled/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[143]:
lbl=['Matches Played', 'Wickets']
plt.scatter(d17['Matches Played'], d17['Economy1'],c='blue')
plt.scatter(d17['Wickets'],d17['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[144]:
lbl=['Ct_St', 'Run Outs', 'Four', 'Six']
plt.scatter(d17['Matches Played'],d17['Ct_St'],c='red')
plt.scatter(d17['Matches Played'],d17['Run Outs'],c='black')
plt.scatter(d17['Matches Played'],d17['Four'],c='blue')
plt.scatter(d17['Matches Played'],d17['Six'],c='green')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('Ct_St/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[145]:
val=['2017','10','500','10','400','25' ,'40' ,'10' ,'26' ,'10' ,'105','18','340','20','430',
'3','18','10','17','0','3.5','1','17','130','160','7.5','12']
fo.write('10')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
# In[146]:
d18=pd.read_csv("1902HackDATA/IPL2018CSV.csv")
#***Basic/General/Normal Information
d18.head()
d18.dtypes
d18.info()
d18.describe()
# In[147]:
#d18.isnull().any().count()
pd.isnull(d18).sum()
# In[148]:
d18['Economy'].fillna(0, inplace=True)
# In[149]:
d18['Strike Rate1']=None
d18['Economy1']=None
# In[150]:
for i in range(d18.shape[0]):
try:
d18['Strike Rate1'][i]=float(d18['Strike Rate'][i])
except ValueError:
d18['Strike Rate1'][i]=0.0
try:
d18['Economy1'][i]=float(d18['Economy'][i])
except ValueError:
d18['Economy1'][i]=0.0
# In[151]:
#pd.set_option('display.max_rows',500)
#pd.set_option('display.max_columns',100)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
#d18.isnull().any()
d18
# In[152]:
print([x for x in d18], '\n')
print(len(d18["Player's Name"].unique()))
# In[153]:
lbl=['Strike Rate', 'Balls']
plt.scatter(d18['Run'],d18['Strike Rate1'],c='blue')
plt.scatter(d18['Run'],d18['Ball'],c='red')
plt.legend(lbl)
plt.xlabel('Run')
plt.ylabel('StrikeRate/Ball')
plt.title('Scatter Visulization',fontsize=14)
# In[154]:
lbl=['Balls','Strike Rate']
plt.scatter(d18['Highest Run Scored'],d18['Ball'],c='red')
plt.scatter(d18['Highest Run Scored'],d18['Strike Rate1'],c='green')
plt.legend(lbl)
plt.xlabel('HighestRunScored')
plt.ylabel('Ball/StrikeRate')
plt.title('Scatter Visulization',fontsize=14)
# In[155]:
lbl=['Strike Rate', 'Run Outs', 'Four', 'Six']
plt.scatter(d18['Matches Played'],d18['Strike Rate1'],c='blue')
plt.scatter(d18['Matches Played'],d18['Run Outs'],c='black')
plt.scatter(d18['Matches Played'],d18["4's"],c='green')
plt.scatter(d18['Matches Played'],d18["6's"],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('StrikeRate/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[156]:
lbl=['Runs Conceded','Balls Bowled','Wickets']
plt.scatter(d18['Runs Scored'],d18['Economy1'],c='green')
plt.scatter(d18['Balls Bowled'],d18['Economy1'],c='yellow')
plt.scatter(d18['Wickets'],d18['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('RunsConcede/BallsBowled/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[157]:
lbl=['Matches Played', 'Wickets']
plt.scatter(d18['Matches Played'], d18['Economy1'],c='blue')
plt.scatter(d18['Wickets'],d18['Economy1'],c='red')
plt.legend(lbl)
plt.xlabel('MatchesPlayed/Wickets')
plt.ylabel('Economy')
plt.title('Scatter Visulization',fontsize=14)
# In[158]:
lbl=['Ct_St', 'Run Outs', 'Four', 'Six']
plt.scatter(d18['Matches Played'],d18['Ct_St'],c='red')
plt.scatter(d18['Matches Played'],d18['Run Outs'],c='black')
plt.scatter(d18['Matches Played'],d18["4's"],c='blue')
plt.scatter(d18['Matches Played'],d18["6's"],c='green')
plt.legend(lbl)
plt.xlabel('MatchesPlayed')
plt.ylabel('Ct_St/RunOuts/Four/Six')
plt.title('Scatter Visulization',fontsize=14)
# In[159]:
val=['2018','20','600','30','420','20' ,'60' ,'8' ,'27' ,'12' ,'115','12','400','20','480',
'2','19','9','18.5','0','2.5','1','17','140','180','7.4','11.9']
fo.write('11')
for va in val:
fo.write(',')
fo.write(va)
fo.write('\n')
fo.close()
# In[160]:
dptr=pd.read_csv("1902HackDATA/PdtrCSV.csv")
#***Basic/General/Normal Information
dptr.head()
dptr.dtypes
dptr.info()
dptr.describe()
# In[161]:
#pd.set_option('display.max_rows',500)
pd.set_option('display.max_columns',100)
#pd.reset_option('display.max_rows')
#pd.reset_option('display.max_columns')
#ddptr.isnull().any()
dptr
# In[162]:
dptr.shape
# In[163]:
lbl=[pdtr[x] for x in range(1,7)]
plt.plot(dptr['Year'], dptr[lbl[0]],c='purple')
plt.plot(dptr['Year'], dptr[lbl[1]],c='red')
plt.plot(dptr['Year'], dptr[lbl[2]],c='blue')
plt.plot(dptr['Year'], dptr[lbl[3]],c='green')
plt.plot(dptr['Year'], dptr[lbl[4]],c='brown')
plt.plot(dptr['Year'], dptr[lbl[5]],c='black')
plt.xlabel('Year')
plt.ylabel('Statistic')
plt.legend(lbl)
plt.title('Line Plot Visulization',fontsize=14)
# In[164]:
lbl=[pdtr[x] for x in range(7,15)]
plt.plot(dptr['Year'], dptr[lbl[0]],c='purple')
plt.plot(dptr['Year'], dptr[lbl[1]],c='red')
plt.plot(dptr['Year'], dptr[lbl[2]],c='blue')
plt.plot(dptr['Year'], dptr[lbl[3]],c='green')
plt.plot(dptr['Year'], dptr[lbl[4]],c='yellow')
plt.plot(dptr['Year'], dptr[lbl[5]],c='black')
plt.plot(dptr['Year'], dptr[lbl[6]],c='cyan')
plt.plot(dptr['Year'], dptr[lbl[7]],c='magenta')
plt.xlabel('Year')
plt.ylabel('Statistic')
plt.legend(lbl)
plt.title('Line Plot Visulization',fontsize=14)
# In[165]:
lbl=[pdtr[x] for x in range(15,21)]
plt.plot(dptr['Year'], dptr[lbl[0]],c='purple')
plt.plot(dptr['Year'], dptr[lbl[1]],c='red')
plt.plot(dptr['Year'], dptr[lbl[2]],c='blue')
plt.plot(dptr['Year'], dptr[lbl[3]],c='green')
plt.plot(dptr['Year'], dptr[lbl[4]],c='yellow')
plt.plot(dptr['Year'], dptr[lbl[5]],c='black')
plt.xlabel('Year')
plt.ylabel('Statistic')
plt.legend(lbl)
plt.title('Line Plot Visulization',fontsize=14)
# In[166]:
lbl=[pdtr[x] for x in range(21,27)]
plt.plot(dptr['Year'], dptr[lbl[0]],c='olive')
plt.plot(dptr['Year'], dptr[lbl[1]],c='red')
plt.plot(dptr['Year'], dptr[lbl[2]],c='blue')
plt.plot(dptr['Year'], dptr[lbl[3]],c='green')
plt.plot(dptr['Year'], dptr[lbl[4]],c='grey')
plt.plot(dptr['Year'], dptr[lbl[5]],c='black')
plt.xlabel('Year')
plt.ylabel('Statistic')
plt.legend(lbl)
plt.title('Line Plot Visulization',fontsize=14)
# In[167]:
ev_al_m=['Year', 'Run', 'Ball', "4's", "6's", 'Highest Run Scored', 'Balls Bowled', 'Runs Conceded', 'Wickets',
'Ct_St', 'Run Outs', 'Matches Played', 'Strike Rate1', 'Economy1']
evbt=['Year', 'Run', 'Ball', "4's", "6's", 'Highest Run Scored', 'Wickets', 'Ct_St','Run Outs', 'Matches Played', 'Strike Rate1']
evbl=['Year', 'Highest Run Scored', 'Balls Bowled', 'Runs Conceded', 'Wickets', 'Ct_St', 'Run Outs', 'Matches Played', 'Economy1']
# In[168]:
val19=[]
for x in dptr:
sm=0
for i in range(6):
sm+=float(dptr[x][i])
for i in range(6,11):
sm+=(i-5)*float(dptr[x][i])
val19.append(round(sm/21,5))
val19[0]=12
val19[1]=2019
#print(val19)
val19m=[]
for x in range(0, len(val19), 2):
ttt=(float(val19[x])+float(val19[x+1]))/2
val19m.append(round(ttt,5))
val19m[0]=2019
#print(val19m)
# In[169]:
val19bt=[2019]
val19bl=[2019]
val19al=[2019]
for x in range(2,12,2):
ttt=(float(val19[x])+2.82*float(val19[x+1]))/3
val19bt.append(round(ttt,5))
for x in range(2,10,2):
ttt=(float(val19[x])+2.82*float(val19[x+1]))/3
val19al.append(round(ttt,5))
for x in range(10, 15, 2):
ttt=(7*float(val19[x])+float(val19[x+1]))/7
val19bl.append(round(ttt,5))
val19al.append(round(ttt,5))
ttt=(float(val19[16])+0.45*float(val19[17]))/2
val19bt.append(round(ttt,5))
ttt=(float(val19[16])+1.4*float(val19[17]))/2
val19bl.append(round(ttt,5))
ttt=(float(val19[16])+1.05*float(val19[17]))/2
val19al.append(round(ttt,5))
for x in range(18, 22, 2):
ttt=(float(val19[x])+1.3*float(val19[x+1]))/2
val19bt.append(round(ttt,5))
val19bl.append(round(ttt,5))
val19al.append(round(ttt,5))
ttt=(float(val19[22])+1.72*float(val19[23]))/2
val19bt.append(round(ttt,5))
val19bl.append(round(ttt,5))
val19al.append(round(ttt,5))
ttt=(float(val19[24])+7*float(val19[25]))/7
val19bt.append(round(ttt,5))
val19al.append(round(ttt,5))
ttt=(5*float(val19[26])+float(val19[27]))/5
val19bl.append(round(ttt,5))
val19al.append(round(ttt,5))
#print(val19bt)
#print(val19bl)
#print(val19al)
# In[170]:
print(len(val19al),len(val19m),len(ev_al_m))
#print(len(val19al),val19[18],val19[19],val19[20],val19[21],val19[22])
#d14['Run'][1]
# In[171]:
#ev_al_m --> val19m'Pram1' and val19al'Pram2'
#evbt --> val19bt'Pram3'
#evbl --> val19bl'Pram4'
#Because all data set does not have same column name
ev_al_m14=['Year', 'Run', 'Ball', "Four", "Six", 'Highest Run Scored', 'Balls Bowled', 'Runs Scored', 'Wickets',
'Ct_St', 'Run Outs', 'Matches | |
<filename>django/docs/topics/testing/overview.txt.py
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXX XXXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXXXXXXXXXXXX
XX XXXXXXXXX
XXX XXXXXXXXXXXXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXX XXXXX
XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XXXXXXXXXXXXXX XXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXX XX XXXXX XXXX XXX XXXXXXX XXXXXXXXX XXXXXX XX XXXXXXX XXX XX XXXXX
XXXXX XXXX XXXXXXX XXXXX XX XXXXXXX XXX XX XXX XXXXX
XXXXXXX XXXXX
XXXXXXXXXXXXX
XXXXXXXX XXXX XXXXX XXX X XXXXXX XXXXXXXX XXXXXXX XXXXXXX XXXXXXXXXXXXXXXX XXXX
XXXXXX XXXXXXX XXXXX XXXXX X XXXXXXXXXXX XXXXXXXXX
XXXX XX XX XXXXXXX XXXXX XXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XX X XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXX XXXX XXXX XXXXXX X
XXXXXXXXXXX XX XXXXXXX XXXXXXXXXXX
XXXX XXXXXXXXXXX XXXXXX XXXXXXXX
XXXX XXXXXXXXXXXX XXXXXX XXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX XXXX XXX XXXXX XXX XXXXXXXXX XXXXXXXXXXXXX
XXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXX XXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXX XXXX XXXXXXXX
XXXX XXX XXXXXXXXX XXXX XXXXX XXXXXXXXXXXXXXXXX XXX XXXXXXX XXXXXXXX XX XXX
XXXX XXXXXXX XX XX XXXX XXX XXX XXXX XXXXX XXXXX XXX XXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXXX XXXXX XXXX XXXXXX XXXX XXXXXXXXX
XXXXXXXXXXXXX XXXXX X XXXX XXXXX XXX XX XXXXX XXXX XXXXXX XXX XXX XXXX XXXXXX
XXX XXXX XXXXXXX XXXXX XXXXXXXXXXXXXXXX XXX XXX XXXXXX XXXXXXXXXXXXXX
XX XXXXXXXXXXXX XXXXX XXXXXX XXX XXXXX XXXXX
XXX XXXXXXX XXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXX X XXXXXXXXXXXX XXXX XX XXX
XXX XXXXXXXXXXXX XXXX XXXXX XX XXXX XX XXX XXXX XXXX X XXX XXXXXX XXX XX
XXXX XXXX XXXXX XXXXX XXXXXX XXXXXX XXXX XX XXXXXXXXXXX XX XXXX X XXXXX
XXXXXXX XX XXX XXX XXXXX XXXX XXXXX XXXX XXXXXXXXX XXXXXXXXXX XXXX XX
XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXX XXXX XXXX XX
XXXX XXXXXXXX XXXXXXXXXXXXXX XXXXXX XXX XXXXX
XXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX
XX XXXX XXXXX XXXX XX XXXXXXXX XXXXXX XXXX XX XXXXXXXX XX XXXXXXXX XXXXXXX
XX XXXX XX XXXXXX XXXX XXXX XXXXXXX XX XXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXX XXXX XX XXXXXXX XXXX XXXX XX X
XXXXXXXXXXX XXX XXXXXXXX XXX XXXXXXXXX XXX XX XXXX XXXXX XXXXXXXX XXXX
XXX XXXXXXXX XXXXX XXXXXXXX XXXX XXXX XXXXX XX XXX XXXXX XXXX XXX XXXX
XXXXXX XXXXXXXX XXXXX XXXX XXX XXXX XX XXXX XXXXX XXXX XXXX XXXX XXX XX
XXXXXXXXX XXX XXXX XXXX XXX XX X XXXXXX
XX XXXXXXXXXXXXXXX
XXXXXXX XXXXX
XXXXXXXXXXXXX
XXXX XXXXXX XXXXXXX XXXXXX XXX XXXX XXXXX XXX XXXXXXXXXXXXXXX XXXXXXX XX
XXXX XXXXXXXXX XXXXXXXXXXXXX XXXXXXXXX
X XXXXXXXXXXX XXXX
XXXX XXXXXXXXX XX XXXXX XX XXX XXXXXXXX XXXXXXXX XXXXXXXXXXXXXXXXX XXXX
XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX XXXX XXXX XXXXXXXX XXXXX XX
XXX XXXX XXXXX XXXXXXXXXX XXXXX XXX XXXXXXX XXXXXXX XXXXXXXXXX
XXX XXX XXXXXXX XXXXXXXXXX XXXXX XX XXX XX XXXXXXXXX XXX XXXXXX XX XXXXX
XXXXXXX XX XXXXXXXXXXXXX XXXXXXX XXXX XXXX XXXXX XXX XX X XXXX XXXXXX XXXXXX
XXXX XX X XXXXXXXX XXXXXXX XXXXXXXXXXXX XXXXXXXXX XX XXXX XXXXXXX XXX XXXXXXXXXX
X XXX XXX XXX XXXXX XX XXX XXXXXXXXXXXXX XXXXXX
X XXXXXXXXXXX XXXX XXXXXXXXXXXXX
X XXX XXX XXX XXXXX XXXXX XXXXXX XXX XXXXXXXXX XXXXXXX
X XXXXXXXXXXX XXXX XXXXXXX
X XXX XXXX XXX XXXX XXXX
X XXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXX XXXX XXX XXXX XXXXXX
X XXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXX XXXX XXXXXXX X XXXX XX X XXXXXXXXX XX XXXXXXXX XXXXX XXXXX XXXX
XXXXXXXXXXX
X XXXXXXXXXXX XXXX XXXXXXXX
XXX XXX XXXXXXX X XXXXXX XXXXXXXX XXXXXXX XXXXX XXXXX XXX XXXXXX XXX
XXXXXXXXXXXXXX XXXXXXX XX XXXX XXXX XXXXX XXX XXXXX XXXXXXXXXXX XXXX XXX
XXXXXXXXXXXX XXXXXXXXX
X XXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXX
XX XXX XXXXX XXXXXXXXXX XXXXX XXX XXXXX XXX XXXXXXXX XXX XXXX XXXXXX XXXX
XXXX XXX XXX XXXXXXXXX XXXXXXX XXXX XX XXXXXXXX XXX XXXX XXXX XXXXXXXXXXX
XXXXXX X XXXXXXXX XXXX XXX XXXX XXXXXX XXXX XXXXXX XXXXXXX XX XXX XXXX
XXXXXXXXX XXXXXX XX XXX XXXX XXXXX XXXX XXX XXX XXX XXXX XXXXXX XXX XXXXXXXX
XXXX XXXXXXXXXXXX XXX XXXXXXX XXX XXXX XXXXXXXXX XX XXXXXX XXXX XXXXXXXX
XXXXXXXXXX XXX XX XXXX XXXXXX XX XXX XXXXXX XX XXXX XXX XXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXX XXXXXXX XXXXXX XXXX XXXX XXXXX XXX XXXXXXXXXXXX XXXXXXX XXX
XXXX XX XXX XXXXXXX XX XXX XXXXXXXX XXXXXXX XXXXXXX XXX XXX XXXX XXXX XXX XX
XXXXXXXXX
XX XXX XX XXX XXXX XX XXXX XXX XXX XXXXXXXXX XXXXXXX XXXX XX XXXXXXX XXX
XXX XXXXX XXXXXXXXXX X XXXXXX XXXX XXX XXX XXXX XXX XXXX XXXX XXXXXXXXXXXX
XXX XXX XXXXXXXXXXX XX XXXXXXX XX XXX XXXXX XXX XXXXXX XXX XXXXXXXXXXXX XXXX
XX XXXXXXXXX XXX XXX XXXX XXXXXXXXX XXXXXXX XX XXX XXX XXXX XXX XX XXXXXXXXXX
XX XXXXXXXXXXXX XXXX XXXX XXXXXXXX XXXXXXX
XXXX X XXXX XXXX XX XXX XXXX XXXXX XXXX XXXXXX XXXXXXXX XXXXXXXX
XXXXXXXX XXX XXXXXXXXX XXXXXXX XXX XXXXXXX XXXX XXXXX XXXXXX XX
XXXXXXX XXXXXXXXXXX XXXXXXXXX XXXXXXX XXXX XXXX XXXXX XXXXXX XXXXXXXXXX
XXXX XXXXX XXXXXXXX XX XXXX XXXX XXXXXXXX XXX XXXXX XXXXX XX XXXX XXXXX
XXXX XXXXX XX XXXX XXXX XXXX XXXXXX XXXXXXXX XXXXX XXX XXXXX XXXXXXX
XXXX X XXXXXX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXX
XXX XXXX XXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX XXXX XXXXXXX X XXXXXXXX XXXXXXXX XXXXX XXXXXX XXXX XXX XXX XXXX XXXXXX
XXXXXXXXXXXX XXXXXXXXX XXXXXXXXX XXXXX XXXXXXXXX XXX XXXXXXX XXX XXX XXXXXX
XXXXXXXXXX XX XXXXXXX XXX XXXXX XXXX XX XXXXX XXX XXXX XXXXXXXXX XXX XXXXXXXXX
XXXX XXX XXX XXXXX XXXX XXXX XXXXXXXXX
XXX XXX XXXXXXX XXX XXXX XXXXXXXXX XXXX XXXXX XXXXXXXXX XX XXXXX XXX
XXXXXXXXXXXXX XXXXXXXXX XXXXXXX XXXX XXXX XXXXXXXX XXX XXXX XXXXXXXX XXXXXXX
XXXXX XX XXX XXXXXXXX XXXX XXX XXXXXX XX XXXX XXXXX XX XXXXXXXX XXX XXXXXXXXXX
XXXX XXXX XX XXXXXXX XX XXXXX XX XXXX XX XX XX XXXXX
XX XXXXXXXXX XX XXX XXXXXXXX XXXXXXXX XX X XXXX XXX XX XXXXXXXXXX XXXXXXXXXXXX
XXX XXXX XXXXXXXX XXX XXX XX XXXXXXXXXX XX XXX XXXX XXXX XXXXXX XX XXXXX
XXXXXXX XXX XXXX XX XXXXX XX XXXXXXX XXX XXXXXXXXX XXX XXX XXXXXXXXXXXXX
XXXXXXXXXX XXXXXX XX XXXXXXXX XXXX XXXXXX XXX XXXXXXXXXXXXX XXXXXXX XXX
XXXXXXXXX XXXX XXX XX XXXXXX XXXX XXXXXXX XXXXX XX X XXXXXXXXXX XXXXXXXXXXX
XXXXXX XXXXX XXXXX XXX XX XXXXXXXXXXX XX X XXXXXXXX XXX XXXXXXXX
XXX XXXXXXX XXXX XXXXXXXX XXXXX XXX XXXXXXX XX XXXXXXXXXX XXXXXXXXX XX XXX
XXXXX XX XXXX XXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXX XXXX XXXXX XXXXXXX XXX
XXXXX XXXX XXX XX XXXXXXXXX XXXXXXXX XX XXXXXXX XXXXXX XXX XXXXXXXX XXXX XX
XXXXXXX XX XXXXXXX XXXXXXXXX XXX XXXXXXXXXX XXXXXXXXXXX XXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX XXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXX XXXXXX X XXXXXX XX XXXXXXXX
XX XXXXXXXXX XXXX XXXX XXXXXXXXX XXX XXXXXXXX XX XXX XXXX XX XXX X XXXXXXXXX
XXXXXXXX XXXXX XXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXX XX XXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX XXXXXXXXXX XXX XXX XXXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXX XXXX XXXX XXXX XXXX XXXXXX XX XXX XXXXXXXX
XXXXXXXXXXXX XXXXXXXXX
XXXXX XXXX XXXXX X XXXXXXXX XXXXXXXXX XXX XXXX XXXXXX XXXX XXXXXXXXX
XXX XXX XX XXX XXXX XXXXXXXX XXXXXXXX XXX XXXX XX XXXX XXXXXXXX XXXXX
XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXX XXX
XXXX XXXXXXXX XX XXXXXXX XX XXX XXXX XXXXXXXXX XX XXXXXXXXXXXXXXXX XX XXXXXX
XXXX XX XXXX XXXX XXXX XXX XXXXX XXXX XXXXXXX XXX XXXXXXXXXX XXXXXXXXXX XX
XXXXXX X XXX XXXXXXXX XX XXX XXXXXXX
XXX XXXXXXXXXXXX XXXXXXX XXXX XXX XXXXXXXXX XXXXXXXX XX XXXX XXXX
XXXXXXXXX XXX XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXX XXXXXXX XX XXXXXX XXXXX
XXXXXX XXX XXX XXXX XXX XXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXX XX
XXXXXXX XXX XXXXXXXXXX XXXXXXXXX XXXX XX XXX XXXX XXXXXXXXX XXX XXX
XXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXX XXXXXXX XX XXXXX
XXX XXXXX XXXXXXXX XXXXXXXXX
XX XXXXX XX XXXXXX XXXXXXXXX XXXXXXXX XXXX XXXXXXX XXXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX XX XXX XXX XXXXX XXXXX
XXXX XXXXXXX XX XXXXX XXX XXXXXXXX XXXXXXX XXXXXXXX
XX XXXXXXXXXXXX XXXXXXX XXXX XXXX XXXX XXXXXXXXXX XXXXXXXX XXXX XXXXXXX XXXXXX
XX XXXX XXXX XXXXXXXX XX XXXXXX XXX XXXXXXXX XXXX XXX XXXXXXX XXX XXXXXXXXX
XXXX XXXX XXXXX XXXXXXXX XXX XXXX XXXXXXXX XX XXX XXX XXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXX XXX XXXXXXXX XX XXX XXXX X XXXXXXXX XXXXX XX
XXXXXXXXXXXX XXXX XXX X XXXX XXXXXXXX XXXXXXX XXXXXXXXXX XXXX XXXXX XXXXXXX
XXXX XXXXXX XXX XX X XXX XXXX XX XXXX XXXX XXXXXXXXXXX XXXXXXXX XXXXXXX XX
XXXX XXXXX XXXXXX X XXXXXXX XXXX XXXX XX XXXX XX XXXXXXX XX XXXXX
XXXX XXXX XXXXXXX XX XXXXXXXXXX XXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX
XXX XXXXXXXXXXXXXX XXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXX
XXXXX XX XXXXX XXXXX XXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXX XX XXXXXXXXX XXXX XXX XXXXXXXXXXXX XXXX XXXXXX XXXX X XXXXX XXXXXXXXX
XXX XXXXXX XXXX XXXXXX XXXXXXXX XXXXX XX XXX XXXXXXXXX XXXX
X XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXX XXX XXXXXX
X XXXXX XXX XXXXX XXXXXXXXXXXX XXXXX XXXXX XXXXX XXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XXXX XX XXXXXXXXXX
XXXXXXXX XXXXXXXXXX XXX XXXXXXXX XXXXX XXXXX
X XXXX XXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXX XXXXXXXXX XXXX XXX
XXXXX XXX XXXXXXXX XXXXXXX XXXXXXXXX XX XX XXX XXXXXXXX XXXXX XXX XXXX
XX XXXXXX
XXX XXX XXXXXXXX XX XXXXX XXX XXXXXX XXXXXXXXXX XXXXXXXXXXXX XX XXXX XXXX
XXXXXXXXX XXXX XX XXX XXXX XXXX XXXXXXXX XXXX XXXXXX XX XXXXX XXXX XX XXX
| |
where:
callCount = the number of calls made
timeTakenCPU = the CPU time spent in microseconds
timeTakenWall = the actual time ( wall time ) spent in microseconds
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def pixelAspect(self):
"""
self.pixelAspect() -> int.
Pixel Aspect ratio of the node.
@return: float.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def proxy(self):
"""
self.proxy() -> bool
@return: True if proxy is enabled, False otherwise.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def readKnobs(self, s):
"""
self.readKnobs(s) -> None.
Read the knobs from a string (TCL syntax).
@param s: A string.
@return: None.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def redraw(self):
"""
self.redraw() -> None.
Force a redraw of the node.
@return: None.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def removeKnob(self, k):
"""
self.removeKnob(k) -> None.
Remove knob k from this node or panel. Throws a ValueError exception if k is not found on the node.
@param k: Knob.
@return: None.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def resetKnobsToDefault(self):
"""
self.resetKnobsToDefault() -> None
Reset all the knobs to their default values.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def running(self):
"""
self.running() -> Node rendering when paralled threads are running or None.
Class method.
@return: Node rendering when paralled threads are running or None.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def sample(self, c, x, y, dx, dy, frame):
"""
self.sample(c, x, y, dx, dy) -> Floating point value.
Return pixel values from an image.
This requires the image to be calculated, so performance may be very bad if this is placed into an expression in
a control panel. Produces a cubic filtered result. Any sizes less than 1, including 0, produce the same filtered result,
this is correct based on sampling theory. Note that integers are at the corners of pixels, to center on a pixel add .5 to both coordinates.
If the optional dx,dy are not given then the exact value of the square pixel that x,y lands in is returned. This is also called 'impulse filtering'.
@param c: Channel name.
@param x: Centre of the area to sample (X coordinate).
@param y: Centre of the area to sample (Y coordinate).
@param dx: Optional size of the area to sample (X coordinate).
@param dy: Optional size of the area to sample (Y coordinate).
@param frame: Optional frame to sample the node at.
@return: Floating point value.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def screenHeight(self):
"""
self.screenHeight() -> int.
Height of the node when displayed on screen in the DAG, at 1:1 zoom, in pixels.
@return: int.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def screenWidth(self):
"""
self.screenWidth() -> int.
Width of the node when displayed on screen in the DAG, at 1:1 zoom, in pixels.
@return: int.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def selectOnly(self):
"""
self.selectOnly() -> None.
Set this node to be the only selection, as if it had been clicked in the DAG.
@return: None.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def setInput(self, i, node):
"""
self.setInput(i, node) -> bool
Connect input i to node if canSetInput() returns true.
@param i: Input number.
@param node: The node to connect to input i.
@return: True if canSetInput() returns true, or if the input is already correct.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def setName(self, name, uncollide, updateExpressions):
"""
self.setName(name, uncollide=True, updateExpressions=False) -> None
Set name of the node and resolve name collisions if optional named argument 'uncollide' is True.
@param name: A string.
@param uncollide: Optional boolean to resolve name collisions. Defaults to True.
@param updateExpressions: Optional boolean to update expressions in other nodes to point at the new name. Defaults to False.
@return: None
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def setSelected(self, selected):
"""
self.setSelected(selected) -> None.
Set the selection state of the node. This is the same as changing the 'selected' knob.
@param selected: New selection state - True or False.
@return: None.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def setTab(self, tabIndex):
"""
self.setTab(tabIndex) -> None
@param tabIndex: The tab to show (first is 0).
@return: None
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def setXYpos(self, x, y):
"""
self.setXYpos(x, y) -> None.
Set the (x, y) position of node in node graph.
@param x: The x position of node in node graph.
@param y: The y position of node in node graph.
@return: None.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def setXpos(self, x):
"""
self.setXpos(x) -> None.
Set the x position of node in node graph.
@param x: The x position of node in node graph.
@return: None.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def setYpos(self, y):
"""
self.setYpos(y) -> None.
Set the y position of node in node graph.
@param y: The y position of node in node graph.
@return: None.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def showControlPanel(self, forceFloat):
"""
self.showControlPanel(forceFloat = false) -> None
@param forceFloat: Optional python object. If it evaluates to True the control panel will always open as a floating panel. Default is False.
@return: None
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def showInfo(self, s):
"""
self.showInfo(s) -> None.
Creates a dialog box showing the result of script s.
@param s: A string.
@return: None.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def shown(self):
"""
self.shown() -> true if the properties panel is open. This can be used to skip updates that are not visible to the user.
@return: true if the properties panel is open. This can be used to skip updates that are not visible to the user.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def treeHasError(self):
"""
treeHasError() -> bool
True if the node or any in its input tree have an error, or False otherwise.
Error state of the node and its input tree.
Note that this will always return false for viewers, which cannot generate their input trees. Instead, choose an input of the viewer (e.g. the active one), and call treeHasError() on that.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def upstreamFrameRange(self, i):
"""
self.upstreamFrameRange(i) -> FrameRange
Frame range for the i'th input of this node.
@param i: Input number.
@return: FrameRange. Returns None when querying an invalid input.
"""
raise NotImplementedError("This function is not written yet. Please put in an issue on the github page.")
def width(self):
"""
self.width() -> int.
Width | |
"""
Each stat kind is populated from one or more stat sections (which are described
in the containers module).
Ns_Kind_CompositeIndex -> composite-indexes
Kind_CompositeIndex -> composite-indexes
Ns_Kind_IsRootEntity -> entities + builtin-indexes
Ns_Kind_NotRootEntity -> entities + builtin-indexes
Kind_IsRootEntity -> entities + builtin-indexes
Kind_NotRootEntity -> entities + builtin-indexes
Ns_PropertyType_PropertyName_Kind -> entity-properties + index-properties
Ns_PropertyName_Kind -> entity-properties + index-properties
Ns_PropertyType_Kind -> entity-properties + index-properties
PropertyType_PropertyName_Kind -> entity-properties + index-properties
Ns_PropertyType -> entity-properties + index-properties
PropertyName_Kind -> entity-properties + index-properties
PropertyType_Kind -> entity-properties + index-properties
PropertyType -> entity-properties + index-properties
Ns_Kind -> entities + builtin-indexes + composite-indexes
Kind -> entities + builtin-indexes + composite-indexes
Namespace -> entities + builtin-indexes + composite-indexes
Ns_Total -> entities + builtin-indexes + composite-indexes
Total -> entities + builtin-indexes + composite-indexes
"""
import datetime
import logging
import sys
import time
from collections import defaultdict
import six
from appscale.common.unpackaged import APPSCALE_PYTHON_APPSERVER
from appscale.datastore.fdb.stats.containers import CountBytes, StatsPropTypes
sys.path.append(APPSCALE_PYTHON_APPSERVER)
from google.appengine.datastore import entity_pb
# The value the datastore uses to populate the meaning field for timestammps.
GD_WHEN = 7
logger = logging.getLogger(__name__)
def fill_entity(project_id, kind, properties, name=None, id_=None,
namespace=''):
entity = entity_pb.EntityProto()
key = entity.mutable_key()
key.set_app(project_id)
if namespace:
key.set_name_space(namespace)
path = key.mutable_path()
element = path.add_element()
element.set_type(kind)
if name is not None:
element.set_name(name)
else:
element.set_id(id_)
group = entity.mutable_entity_group()
group.add_element().CopyFrom(element)
for prop_name, value in six.iteritems(properties):
prop = entity.add_property()
prop.set_name(prop_name)
prop.set_multiple(False)
value_pb = prop.mutable_value()
if isinstance(value, datetime.datetime):
value_pb.set_int64value(
int(time.mktime(value.timetuple()) * 1000000 + value.microsecond))
prop.set_meaning(GD_WHEN)
elif isinstance(value, int):
value_pb.set_int64value(value)
else:
value_pb.set_stringvalue(value.encode('utf-8'))
return entity
def fill_entities(project_id, project_stats, timestamp):
entities = []
composite_stats = project_stats.composite_stats.stats
stats_kind = u'__Stat_Ns_Kind_CompositeIndex__'
for namespace, by_index in six.iteritems(composite_stats):
for (index_id, kind), fields in six.iteritems(by_index):
name = u'_'.join([kind, six.text_type(index_id)])
props = {'index_id': index_id, 'kind_name': kind, 'timestamp': timestamp,
'count': fields.count, 'bytes': fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, name,
namespace=namespace))
stats_kind = u'__Stat_Kind_CompositeIndex__'
composite_stats_by_index = defaultdict(CountBytes)
for namespace, by_index in six.iteritems(composite_stats):
for key, fields in six.iteritems(by_index):
composite_stats_by_index[key] += fields
for (index_id, kind), fields in six.iteritems(composite_stats_by_index):
name = u'_'.join([kind, six.text_type(index_id)])
props = {'index_id': index_id, 'kind_name': kind, 'timestamp': timestamp,
'count': fields.count, 'bytes': fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, name))
entity_stats = project_stats.entity_stats
stats_kind = u'__Stat_Ns_Kind_IsRootEntity__'
for namespace, by_kind in six.iteritems(entity_stats.entities_root):
for kind, entity_fields in six.iteritems(by_kind):
builtin_fields = entity_stats.builtin_indexes_root[namespace][kind]
props = {'kind_name': kind, 'timestamp': timestamp,
'builtin_index_count': builtin_fields.count,
'builtin_index_bytes': builtin_fields.bytes,
'count': entity_fields.count,
'entity_bytes': entity_fields.bytes,
'bytes': entity_fields.bytes + builtin_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, kind,
namespace=namespace))
stats_kind = u'__Stat_Ns_Kind_NotRootEntity__'
for namespace, by_kind in six.iteritems(entity_stats.entities_notroot):
for kind, entity_fields in six.iteritems(by_kind):
builtin_fields = entity_stats.builtin_indexes_notroot[namespace][kind]
props = {'kind_name': kind, 'timestamp': timestamp,
'builtin_index_count': builtin_fields.count,
'builtin_index_bytes': builtin_fields.bytes,
'count': entity_fields.count,
'entity_bytes': entity_fields.bytes,
'bytes': entity_fields.bytes + builtin_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, kind,
namespace=namespace))
stats_kind = u'__Stat_Ns_Kind__'
entity_stats_by_ns_kind = defaultdict(lambda: defaultdict(CountBytes))
for namespace, by_kind in six.iteritems(entity_stats.entities_root):
for kind, fields in six.iteritems(by_kind):
entity_stats_by_ns_kind[namespace][kind] += fields
for namespace, by_kind in six.iteritems(entity_stats.entities_notroot):
for kind, fields in six.iteritems(by_kind):
entity_stats_by_ns_kind[namespace][kind] += fields
builtin_stats_by_ns_kind = defaultdict(lambda: defaultdict(CountBytes))
for namespace, by_kind in six.iteritems(entity_stats.builtin_indexes_root):
for kind, fields in six.iteritems(by_kind):
builtin_stats_by_ns_kind[namespace][kind] += fields
for namespace, by_kind in six.iteritems(entity_stats.builtin_indexes_notroot):
for kind, fields in six.iteritems(by_kind):
builtin_stats_by_ns_kind[namespace][kind] += fields
composite_stats_by_ns_kind = defaultdict(lambda: defaultdict(CountBytes))
for namespace, by_index in six.iteritems(composite_stats):
for (index_id, kind), fields in six.iteritems(by_index):
composite_stats_by_ns_kind[namespace][kind] += fields
for namespace, by_kind in six.iteritems(entity_stats_by_ns_kind):
for kind, entity_fields in six.iteritems(by_kind):
builtin_fields = builtin_stats_by_ns_kind[namespace][kind]
composite_fields = composite_stats_by_ns_kind[namespace][kind]
props = {'kind_name': kind, 'timestamp': timestamp,
'builtin_index_count': builtin_fields.count,
'builtin_index_bytes': builtin_fields.bytes,
'count': entity_fields.count,
'entity_bytes': entity_fields.bytes,
'composite_index_count': composite_fields.count,
'composite_index_bytes': composite_fields.bytes,
'bytes': entity_fields.bytes + builtin_fields.bytes +
composite_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, kind,
namespace=namespace))
stats_kind = u'__Stat_Kind_IsRootEntity__'
root_entity_stats_by_kind = defaultdict(CountBytes)
for namespace, by_kind in six.iteritems(entity_stats.entities_root):
for kind, fields in six.iteritems(by_kind):
root_entity_stats_by_kind[kind] += fields
root_builtin_stats_by_kind = defaultdict(CountBytes)
for namespace, by_kind in six.iteritems(entity_stats.builtin_indexes_root):
for kind, fields in six.iteritems(by_kind):
root_builtin_stats_by_kind[kind] += fields
for kind, entity_fields in six.iteritems(root_entity_stats_by_kind):
builtin_fields = root_builtin_stats_by_kind[kind]
props = {'kind_name': kind, 'timestamp': timestamp,
'builtin_index_count': builtin_fields.count,
'builtin_index_bytes': builtin_fields.bytes,
'count': entity_fields.count, 'entity_bytes': entity_fields.bytes,
'bytes': entity_fields.bytes + builtin_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, kind))
stats_kind = u'__Stat_Kind_NotRootEntity__'
notroot_entity_stats_by_kind = defaultdict(CountBytes)
for namespace, by_kind in six.iteritems(entity_stats.entities_notroot):
for kind, fields in six.iteritems(by_kind):
notroot_entity_stats_by_kind[kind] += fields
notroot_builtin_stats_by_kind = defaultdict(CountBytes)
for namespace, by_kind in six.iteritems(entity_stats.builtin_indexes_notroot):
for kind, fields in six.iteritems(by_kind):
notroot_builtin_stats_by_kind[kind] += fields
for kind, entity_fields in six.iteritems(notroot_entity_stats_by_kind):
builtin_fields = notroot_builtin_stats_by_kind[kind]
props = {'kind_name': kind, 'timestamp': timestamp,
'builtin_index_count': builtin_fields.count,
'builtin_index_bytes': builtin_fields.bytes,
'count': entity_fields.count, 'entity_bytes': entity_fields.bytes,
'bytes': entity_fields.bytes + builtin_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, kind))
stats_kind = u'__Stat_Kind__'
entity_stats_by_kind = defaultdict(CountBytes)
for kind, fields in six.iteritems(root_entity_stats_by_kind):
entity_stats_by_kind[kind] += fields
for kind, fields in six.iteritems(notroot_entity_stats_by_kind):
entity_stats_by_kind[kind] += fields
builtin_stats_by_kind = defaultdict(CountBytes)
for kind, fields in six.iteritems(root_builtin_stats_by_kind):
builtin_stats_by_kind[kind] += fields
for kind, fields in six.iteritems(notroot_builtin_stats_by_kind):
builtin_stats_by_kind[kind] += fields
composite_stats_by_kind = defaultdict(CountBytes)
for (index_id, kind), fields in six.iteritems(composite_stats_by_index):
composite_stats_by_kind[kind] += fields
for kind, entity_fields in six.iteritems(entity_stats_by_kind):
builtin_fields = builtin_stats_by_kind[kind]
composite_fields = composite_stats_by_kind[kind]
props = {'kind_name': kind, 'timestamp': timestamp,
'builtin_index_count': builtin_fields.count,
'builtin_index_bytes': builtin_fields.bytes,
'count': entity_fields.count, 'entity_bytes': entity_fields.bytes,
'composite_index_count': composite_fields.count,
'composite_index_bytes': composite_fields.bytes,
'bytes': entity_fields.bytes + builtin_fields.bytes +
composite_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, kind))
stats_kind = u'__Stat_Namespace__'
composite_stats_by_ns = defaultdict(CountBytes)
for namespace, by_kind in six.iteritems(composite_stats):
composite_stats_by_ns[namespace] += sum(six.itervalues(by_kind),
CountBytes())
entity_stats_by_ns = defaultdict(CountBytes)
for namespace, by_kind in six.iteritems(entity_stats.entities_root):
entity_stats_by_ns[namespace] += sum(six.itervalues(by_kind), CountBytes())
for namespace, by_kind in six.iteritems(entity_stats.entities_notroot):
entity_stats_by_ns[namespace] += sum(six.itervalues(by_kind), CountBytes())
builtin_stats_by_ns = defaultdict(CountBytes)
for namespace, by_kind in six.iteritems(entity_stats.builtin_indexes_root):
builtin_stats_by_ns[namespace] += sum(six.itervalues(by_kind), CountBytes())
for namespace, by_kind in six.iteritems(entity_stats.builtin_indexes_notroot):
builtin_stats_by_ns[namespace] += sum(six.itervalues(by_kind), CountBytes())
for namespace, entity_fields in six.iteritems(entity_stats_by_ns):
builtin_fields = builtin_stats_by_ns[namespace]
composite_fields = composite_stats_by_ns[namespace]
props = {'subject_namespace': namespace, 'timestamp': timestamp,
'builtin_index_count': builtin_fields.count,
'builtin_index_bytes': builtin_fields.bytes,
'count': entity_fields.count, 'entity_bytes': entity_fields.bytes,
'composite_index_count': composite_fields.count,
'composite_index_bytes': composite_fields.bytes,
'bytes': entity_fields.bytes + builtin_fields.bytes +
composite_fields.bytes}
if namespace:
entities.append(fill_entity(project_id, stats_kind, props, namespace))
else:
entities.append(fill_entity(project_id, stats_kind, props, id_=1))
stats_kind = u'__Stat_Ns_Total__'
name = u'total_entity_usage'
for namespace, entity_fields in six.iteritems(entity_stats_by_ns):
builtin_fields = builtin_stats_by_ns[namespace]
composite_fields = composite_stats_by_ns[namespace]
props = {'timestamp': timestamp,
'builtin_index_count': builtin_fields.count,
'builtin_index_bytes': builtin_fields.bytes,
'count': entity_fields.count, 'entity_bytes': entity_fields.bytes,
'composite_index_count': composite_fields.count,
'composite_index_bytes': composite_fields.bytes,
'bytes': entity_fields.bytes + builtin_fields.bytes +
composite_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, name,
namespace=namespace))
stats_kind = u'__Stat_Total__'
name = u'total_entity_usage'
entity_fields = sum(six.itervalues(entity_stats_by_ns), CountBytes())
builtin_fields = sum(six.itervalues(builtin_stats_by_ns), CountBytes())
composite_fields = sum(six.itervalues(composite_stats_by_ns), CountBytes())
props = {'timestamp': timestamp,
'builtin_index_count': builtin_fields.count,
'builtin_index_bytes': builtin_fields.bytes,
'count': entity_fields.count, 'entity_bytes': entity_fields.bytes,
'composite_index_count': composite_fields.count,
'composite_index_bytes': composite_fields.bytes,
'bytes': entity_fields.bytes + builtin_fields.bytes +
composite_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, name))
prop_stats = project_stats.property_stats
stats_kind = u'__Stat_Ns_PropertyType_PropertyName_Kind__'
for namespace, by_kind in six.iteritems(prop_stats.entity_stats):
for kind, by_type in six.iteritems(by_kind):
for prop_type, by_name in six.iteritems(by_type):
type_name = StatsPropTypes.NAMES[prop_type]
for prop_name, entity_fields in six.iteritems(by_name):
name = u'_'.join([type_name, prop_name, kind])
index_fields = prop_stats.index_stats[namespace][kind][prop_type]\
[prop_name]
props = {'kind_name': kind, 'timestamp': timestamp,
'property_type': type_name, 'property_name': prop_name,
'builtin_index_count': index_fields.count,
'builtin_index_bytes': index_fields.bytes,
'count': entity_fields.count,
'entity_bytes': entity_fields.bytes,
'bytes': entity_fields.bytes + index_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, name,
namespace=namespace))
stats_kind = u'__Stat_Ns_PropertyType_Kind__'
for namespace, by_kind in six.iteritems(prop_stats.entity_stats):
for kind, by_type in six.iteritems(by_kind):
for prop_type, by_name in six.iteritems(by_type):
type_name = StatsPropTypes.NAMES[prop_type]
name = u'_'.join([type_name, kind])
entity_fields = sum(six.itervalues(by_name), CountBytes())
index_fields = sum(
six.itervalues(prop_stats.index_stats[namespace][kind][prop_type]),
CountBytes())
props = {'kind_name': kind, 'timestamp': timestamp,
'property_type': type_name,
'builtin_index_count': index_fields.count,
'builtin_index_bytes': index_fields.bytes,
'count': entity_fields.count,
'entity_bytes': entity_fields.bytes,
'bytes': entity_fields.bytes + index_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, name,
namespace=namespace))
stats_kind = u'__Stat_Ns_PropertyName_Kind__'
for namespace, by_kind in six.iteritems(prop_stats.entity_stats):
for kind, by_type in six.iteritems(by_kind):
combined_entities = defaultdict(CountBytes)
combined_indexes = defaultdict(CountBytes)
for prop_type, by_name in six.iteritems(by_type):
for prop_name, fields in six.iteritems(by_name):
combined_entities[prop_name] += fields
combined_indexes[prop_name] += prop_stats.index_stats[namespace]\
[kind][prop_type][prop_name]
for prop_name, entity_fields in six.iteritems(combined_entities):
name = u'_'.join([prop_name, kind])
index_fields = combined_indexes[prop_name]
props = {'kind_name': kind, 'timestamp': timestamp,
'property_name': prop_name,
'builtin_index_count': index_fields.count,
'builtin_index_bytes': index_fields.bytes,
'count': entity_fields.count,
'entity_bytes': entity_fields.bytes,
'bytes': entity_fields.bytes + index_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, name,
namespace=namespace))
stats_kind = u'__Stat_Ns_PropertyType__'
for namespace, by_kind in six.iteritems(prop_stats.entity_stats):
combined_entities = defaultdict(CountBytes)
combined_indexes = defaultdict(CountBytes)
for kind, by_type in six.iteritems(by_kind):
for prop_type, by_name in six.iteritems(by_type):
combined_entities[prop_type] += sum(
six.itervalues(by_name), CountBytes())
combined_indexes[prop_type] += sum(
six.itervalues(prop_stats.index_stats[namespace][kind][prop_type]),
CountBytes())
for prop_type, entity_fields in six.iteritems(combined_entities):
type_name = StatsPropTypes.NAMES[prop_type]
index_fields = combined_indexes[prop_type]
props = {'timestamp': timestamp, 'property_type': type_name,
'builtin_index_count': index_fields.count,
'builtin_index_bytes': index_fields.bytes,
'count': entity_fields.count,
'entity_bytes': entity_fields.bytes,
'bytes': entity_fields.bytes + index_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, type_name,
namespace=namespace))
stats_kind = u'__Stat_PropertyName_Kind__'
combined_entities = defaultdict(lambda: defaultdict(CountBytes))
combined_indexes = defaultdict(lambda: defaultdict(CountBytes))
for namespace, by_kind in six.iteritems(prop_stats.entity_stats):
for kind, by_type in six.iteritems(by_kind):
for prop_type, by_name in six.iteritems(by_type):
for prop_name, fields in six.iteritems(by_name):
combined_entities[prop_name][kind] += fields
combined_indexes[prop_name][kind] += prop_stats.index_stats\
[namespace][kind][prop_type][prop_name]
for prop_name, by_kind in six.iteritems(combined_entities):
for kind, entity_fields in six.iteritems(by_kind):
index_fields = combined_indexes[prop_name][kind]
name = u'_'.join([prop_name, kind])
props = {'timestamp': timestamp, 'kind_name': kind,
'property_name': prop_name,
'builtin_index_count': index_fields.count,
'builtin_index_bytes': index_fields.bytes,
'count': entity_fields.count,
'entity_bytes': entity_fields.bytes,
'bytes': entity_fields.bytes + index_fields.bytes}
entities.append(fill_entity(project_id, stats_kind, props, name))
stats_kind = u'__Stat_PropertyType_Kind__'
combined_entities = defaultdict(lambda: defaultdict(CountBytes))
combined_indexes = defaultdict(lambda: defaultdict(CountBytes))
for namespace, by_kind in six.iteritems(prop_stats.entity_stats):
for kind, by_type in six.iteritems(by_kind):
for prop_type, by_name in six.iteritems(by_type):
combined_entities[prop_type][kind] += | |
``px``, etc.
hx : list, optional
list of normalized field heights along x axis, of length ``numRays``;
if ``None``, a list of 0.0s for ``hx`` is created.
hy : list, optional
list of normalized field heights along y axis, of length ``numRays``;
if ``None``, a list of 0.0s for ``hy`` is created
px : list, optional
list of normalized heights in pupil coordinates, along x axis, of
length ``numRays``; if ``None``, a list of 0.0s for ``px`` is created.
py : list, optional
list of normalized heights in pupil coordinates, along y axis, of
length ``numRays``; if ``None``, a list of 0.0s for ``py`` is created
Exr : list, optional
list of real part of the electric field in x direction for each ray.
if ``None``, a list of 0.0s for ``Exr`` is created. See Notes
Exi : list, optional
list of imaginary part of the electric field in x direction for each ray.
if ``None``, a list of 0.0s for ``Exi`` is created. See Notes
Eyr : list, optional
list of real part of the electric field in y direction for each ray.
if ``None``, a list of 0.0s for ``Eyr`` is created. See Notes
Eyi : list, optional
list of imaginary part of the electric field in y direction for each ray.
if ``None``, a list of 0.0s for ``Eyi`` is created. See Notes
Ezr : list, optional
list of real part of the electric field in z direction for each ray.
if ``None``, a list of 0.0s for ``Ezr`` is created. See Notes
Ezi : list, optional
list of imaginary part of the electric field in z direction for each ray.
if ``None``, a list of 0.0s for ``Ezi`` is created. See Notes
Ex : float
normalized electric field magnitude in x direction to be defined in
array position 0. If not provided, an unpolarized ray will be traced.
See Notes.
Ey : float
normalized electric field magnitude in y direction to be defined in
array position 0. If not provided, an unpolarized ray will be traced.
See Notes.
Phax : float
relative phase in x direction in degrees
Phay : float
relative phase in y direction in degrees
intensity : float or list, optional
initial intensities. If a list of length ``numRays`` is given it is
used. If a single float value is passed, all rays use the same value for
their initial intensities. If ``None``, all rays use a value of ``1.0``
as their initial intensities.
waveNum : integer or list (of integers), optional
wavelength number. If a list of integers of length ``numRays`` is given
it is used. If a single integer value is passed, all rays use the same
value for wavelength number. If ``None``, all rays use wavelength
number equal to 1.
mode : integer, optional
0 = real (Default), 1 = paraxial
surf : integer, optional
surface to trace the ray to. (``surf = -1``, default)
timeout : integer, optional
command timeout specified in milli-seconds
Returns
-------
error : list of integers
0 = ray traced successfully;
+ve number = the ray missed the surface;
-ve number = the ray total internal reflected (TIR) at surface
given by the absolute value of the ``error``
intensity : list of reals
the relative transmitted intensity of the ray, including any pupil
or surface apodization defined.
Exr : list of real values
list of real parts of the electric field components in x
Exi : list of real values
list of imaginary parts of the electric field components in x
Eyr : list of real values
list of real parts of the electric field components in y
Eyi : list of real values
list of imaginary parts of the electric field components in y
Ezr : list of real values
list of real parts of the electric field components in z
Ezi : list of real values
list of imaginary parts of the electric field components in z
If ray tracing fails, a single integer error code is returned,
which has the following meaning: -1 = Couldn't retrieve data in
PostArrayTraceMessage, -999 = Couldn't communicate with Zemax,
-998 = timeout reached
Notes
-----
1. If all six of the electric field values ``Exr``, ``Exi``, ``Eyr``,
``Eyi``, ``Ezr``, and ``Ezi`` for a ray are zero Zemax will use the
``Ex`` and ``Ey`` values provided in array position 0 to determine
the electric field. Otherwise, the electric field is defined by these
six values.
2. The defined electric field vector must be orthogonal to the ray vector
or incorrect ray tracing will result.
3. Even if these six values are defined for each ray, values for ``Ex``
and ``Ey`` in the array position 0 must still be defined, otherwise
an unpolarized ray trace will result.
"""
rd = getRayDataArray(numRays, tType=2, mode=mode, endSurf=surf,
x=Ex, y=Ey, z=Phax, l=Phay)
hx = hx if hx else [0.0] * numRays
hy = hy if hy else [0.0] * numRays
px = px if px else [0.0] * numRays
py = py if py else [0.0] * numRays
Exr = Exr if Exr else [0.0] * numRays
Exi = Exi if Exi else [0.0] * numRays
Eyr = Eyr if Eyr else [0.0] * numRays
Eyi = Eyi if Eyi else [0.0] * numRays
Ezr = Ezr if Ezr else [0.0] * numRays
Ezi = Ezi if Ezi else [0.0] * numRays
if intensity:
intensity = intensity if isinstance(intensity, list) else [intensity]*numRays
else:
intensity = [1.0] * numRays
if waveNum:
waveNum = waveNum if isinstance(waveNum, list) else [waveNum]*numRays
else:
waveNum = [1] * numRays
# fill up the structure
for i in xrange(1, numRays+1):
rd[i].x = hx[i-1]
rd[i].y = hy[i-1]
rd[i].z = px[i-1]
rd[i].l = py[i-1]
rd[i].Exr = Exr[i-1]
rd[i].Exi = Exi[i-1]
rd[i].Eyr = Eyr[i-1]
rd[i].Eyi = Eyi[i-1]
rd[i].Ezr = Ezr[i-1]
rd[i].Ezi = Ezi[i-1]
rd[i].intensity = intensity[i-1]
rd[i].wave = waveNum[i-1]
# call ray tracing
ret = zArrayTrace(rd, timeout)
d = {}
if ret == 0:
reals = ['intensity', 'Exr', 'Exi', 'Eyr', 'Eyi', 'Ezr', 'Ezi']
ints = ['error', ]
for r in reals:
exec(r + " = [0.0] * numRays", locals(), d)
for i in ints:
exec(i + " = [0] * numRays", locals(), d)
for i in xrange(1, numRays+1):
d["intensity"][i-1] = rd[i].intensity
d["Exr"][i-1] = rd[i].Exr
d["Exi"][i-1] = rd[i].Exi
d["Eyr"][i-1] = rd[i].Eyr
d["Eyi"][i-1] = rd[i].Eyi
d["Ezr"][i-1] = rd[i].Ezr
d["Ezi"][i-1] = rd[i].Ezi
d["error"][i-1] = rd[i].error
return (d["error"], d["intensity"],
d["Exr"], d["Exi"], d["Eyr"], d["Eyi"], d["Ezr"], d["Ezi"])
else:
return ret
def zGetPolTraceDirectArray(numRays, x=None, y=None, z=None, l=None, m=None,
n=None, Exr=None, Exi=None, Eyr=None, Eyi=None,
Ezr=None, Ezi=None, Ex=0, Ey=0, Phax=0, Phay=0,
intensity=None, waveNum=None, mode=0, startSurf=0,
lastSurf=-1, timeout=5000):
"""Trace large number of polarized rays defined by the ``x``, ``y``, ``z``,
``l``, ``m`` and ``n`` coordinates on any starting surface as well as electric
field magnitude and relative phase. Similar to ``GetPolTraceDirect()``
Ray tracing is performed on the lens file in the LDE of main Zemax
application (not in the DDE server)
Parameters
----------
numRays : integer
number of rays to trace. ``numRays`` should be equal to the length
of the lists (if provided) ``hx``, ``hy``, ``px``, etc.
x : list, optional
list specifying the x coordinates of the ray at the start surface,
of length ``numRays``; if ``None``, a list of 0.0s for ``x`` is created.
y : list, optional
list specifying the y coordinates of the ray at the start surface,
of length ``numRays``; if ``None``, a list of 0.0s for ``y`` is created
z : list, optional
list specifying the z coordinates of the ray at the start surface,
of length ``numRays``; if ``None``, a list of 0.0s for ``z`` is created.
l : list, optional
list of x-direction cosines, of length ``numRays``; if ``None``, a
list of 0.0s for ``l`` is created
m : list, | |
<filename>tests/plugins/test_accounts.py
import bcrypt
import unittest
import uuid
from unittest import mock
from kinto.core import utils
from kinto.core.events import ACTIONS, ResourceChanged
from kinto.core.testing import get_user_headers, DummyRequest
from pyramid.exceptions import ConfigurationError
from pyramid_mailer import get_mailer
from kinto.plugins.accounts import scripts, ACCOUNT_CACHE_KEY
from kinto.plugins.accounts.utils import (
get_cached_reset_password,
get_cached_validation_key,
hash_password,
)
from kinto.plugins.accounts.views import on_account_created
from kinto.plugins.accounts.views.validation import on_account_activated
from .. import support
class AccountsWebTest(support.BaseWebTest, unittest.TestCase):
@classmethod
def get_app_settings(cls, extras=None):
if extras is None:
extras = {}
extras.setdefault("multiauth.policies", "account")
extras.setdefault("includes", "kinto.plugins.accounts")
extras.setdefault("account_create_principals", "system.Everyone")
# XXX: this should be a default setting.
extras.setdefault(
"multiauth.policy.account.use",
"kinto.plugins.accounts.authentication." "AccountsAuthenticationPolicy",
)
extras.setdefault("account_cache_ttl_seconds", "30")
return super().get_app_settings(extras)
class AccountsValidationWebTest(AccountsWebTest):
def setUp(self):
self.mailer = get_mailer(self.app.app.registry)
self.mailer.outbox = [] # Reset the outbox before each test.
@classmethod
def get_app_settings(cls, extras=None):
if extras is None:
extras = {}
# Enable the account validation option.
extras.setdefault("account_validation", True)
# Use a testing mailer.
extras.setdefault("mail.mailer", "testing")
# Email templates for the user creation.
extras.setdefault(
"account_validation.email_subject_template", "{name}, activate your account {id}"
)
extras.setdefault(
"account_validation.email_body_template",
"{activation-form-url}/{id}/{activation-key} {bad-key}",
)
# Email templates for the user validated confirmation.
extras.setdefault(
"account_validation.email_confirmation_subject_template",
"{name}, your account {id} is now active",
)
extras.setdefault(
"account_validation.email_confirmation_body_template",
"Your account {id} has been successfully activated. Connect to {homepage}",
)
# Email templates for the reset password.
extras.setdefault(
"account_validation.email_reset_password_subject_template",
"{name}, here is a temporary reset password for {id}",
)
extras.setdefault(
"account_validation.email_reset_password_body_template",
"You can use this temporary reset password {reset-password} to change your account {id} password",
)
return super().get_app_settings(extras)
class BadAccountsConfigTest(support.BaseWebTest, unittest.TestCase):
def test_raise_configuration_if_accounts_not_mentioned(self):
with self.assertRaises(ConfigurationError) as cm:
self.make_app(
{"includes": "kinto.plugins.accounts", "multiauth.policies": "basicauth"}
)
assert "Account policy missing" in str(cm.exception)
class HelloViewTest(AccountsWebTest):
def test_accounts_capability_if_enabled(self):
resp = self.app.get("/")
capabilities = resp.json["capabilities"]
self.assertIn("accounts", capabilities)
class HelloActivationViewTest(AccountsValidationWebTest):
def test_account_validation_capability_if_enabled(self):
resp = self.app.get("/")
capabilities = resp.json["capabilities"]
self.assertIn("accounts", capabilities)
self.assertIn("validation_enabled", capabilities["accounts"])
self.assertTrue(capabilities["accounts"]["validation_enabled"])
class AccountCreationTest(AccountsWebTest):
def test_anyone_can_create_an_account(self):
self.app.post_json("/accounts", {"data": {"id": "alice", "password": "<PASSWORD>"}}, status=201)
def test_account_can_be_created_with_put(self):
self.app.put_json("/accounts/alice", {"data": {"password": "<PASSWORD>"}}, status=201)
def test_password_is_stored_encrypted(self):
self.app.put_json("/accounts/alice", {"data": {"password": "<PASSWORD>"}}, status=201)
stored = self.app.app.registry.storage.get(
parent_id="alice", resource_name="account", object_id="alice"
)
assert stored["password"] != "<PASSWORD>"
def test_authentication_is_accepted_if_account_exists(self):
self.app.post_json("/accounts", {"data": {"id": "me", "password": "<PASSWORD>"}}, status=201)
resp = self.app.get("/", headers=get_user_headers("me", "bouh"))
assert resp.json["user"]["id"] == "account:me"
def test_password_field_is_mandatory(self):
self.app.post_json("/accounts", {"data": {"id": "me"}}, status=400)
def test_id_field_is_mandatory(self):
self.app.post_json("/accounts", {"data": {"password": "<PASSWORD>"}}, status=400)
def test_id_can_be_email(self):
self.app.put_json(
"/accounts/[email protected]", {"data": {"password": "<PASSWORD>"}}, status=201
)
def test_account_can_have_metadata(self):
resp = self.app.post_json(
"/accounts", {"data": {"id": "me", "password": "<PASSWORD>", "age": 42}}, status=201
)
assert resp.json["data"]["age"] == 42
def test_cannot_create_account_if_already_exists(self):
self.app.post_json("/accounts", {"data": {"id": "me", "password": "<PASSWORD>"}}, status=201)
resp = self.app.post_json(
"/accounts", {"data": {"id": "me", "password": "bouh"}}, status=403
)
assert "already exists" in resp.json["message"]
def test_username_and_account_id_must_match(self):
resp = self.app.put_json(
"/accounts/alice", {"data": {"id": "bob", "password": "<PASSWORD>"}}, status=400
)
assert "does not match" in resp.json["message"]
def test_returns_existing_account_if_authenticated(self):
self.app.post_json("/accounts", {"data": {"id": "me", "password": "<PASSWORD>"}}, status=201)
self.app.post_json(
"/accounts",
{"data": {"id": "me", "password": "<PASSWORD>"}},
headers=get_user_headers("me", "bouh"),
status=200,
)
def test_cannot_create_other_account_if_authenticated(self):
self.app.post_json("/accounts", {"data": {"id": "me", "password": "<PASSWORD>"}}, status=201)
resp = self.app.post_json(
"/accounts",
{"data": {"id": "you", "password": "<PASSWORD>"}},
headers=get_user_headers("me", "bouh"),
status=400,
)
assert "do not match" in resp.json["message"]
def test_authentication_does_not_call_bcrypt_twice(self):
self.app.post_json("/accounts", {"data": {"id": "me", "password": "<PASSWORD>"}}, status=201)
with mock.patch("kinto.plugins.accounts.authentication.bcrypt") as mocked_bcrypt:
resp = self.app.get("/", headers=get_user_headers("me", "bouh"))
assert resp.json["user"]["id"] == "account:me"
resp = self.app.get("/", headers=get_user_headers("me", "bouh"))
assert resp.json["user"]["id"] == "account:me"
assert mocked_bcrypt.checkpw.call_count == 1
def test_authentication_checks_bcrypt_again_if_password_changes(self):
self.app.post_json("/accounts", {"data": {"id": "me", "password": "<PASSWORD>"}}, status=201)
with mock.patch("kinto.plugins.accounts.authentication.bcrypt") as mocked_bcrypt:
resp = self.app.get("/", headers=get_user_headers("me", "bouh"))
assert resp.json["user"]["id"] == "account:me"
self.app.patch_json(
"/accounts/me",
{"data": {"password": "<PASSWORD>"}},
status=200,
headers=get_user_headers("me", "bouh"),
)
resp = self.app.get("/", headers=get_user_headers("me", "blah"))
assert resp.json["user"]["id"] == "account:me"
assert mocked_bcrypt.checkpw.call_count == 2
def test_authentication_refresh_the_cache_each_time_we_authenticate(self):
hmac_secret = self.app.app.registry.settings["userid_hmac_secret"]
cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_CACHE_KEY.format("me"))
self.app.post_json("/accounts", {"data": {"id": "me", "password": "<PASSWORD>"}}, status=201)
resp = self.app.get("/", headers=get_user_headers("me", "bouh"))
assert resp.json["user"]["id"] == "account:me"
self.app.app.registry.cache.expire(cache_key, 10)
resp = self.app.get("/", headers=get_user_headers("me", "bouh"))
assert resp.json["user"]["id"] == "account:me"
assert self.app.app.registry.cache.ttl(cache_key) >= 20
resp = self.app.get("/", headers=get_user_headers("me", "blah"))
assert "user" not in resp.json
def test_validate_view_not_active(self):
# The `validate` view is only active when the `account_validation` option is enabled.
# Create the user.
self.app.post_json(
"/accounts", {"data": {"id": "<EMAIL>", "password": "<PASSWORD>"}}, status=201
)
# Validate the user.
self.app.post_json("/accounts/<EMAIL>/validate/some_validation_key", status=404)
def test_reset_password_view_not_active(self):
# The `validate` view is only active when the `account_validation` option is enabled.
# Create the user.
self.app.post_json(
"/accounts", {"data": {"id": "<EMAIL>", "password": "<PASSWORD>"}}, status=201
)
# Ask for a reset password.
self.app.post_json("/accounts/<EMAIL>/reset-password", status=404)
class AccountValidationCreationTest(AccountsValidationWebTest):
def test_create_account_fails_if_not_email(self):
resp = self.app.post_json(
"/accounts", {"data": {"id": "alice", "password": "<PASSWORD>"}}, status=400
)
assert "user id should match" in resp.json["message"]
def test_create_account_stores_activated_field(self):
uuid_string = "20e81ab7-51c0-444f-b204-f1c4cfe1aa7a"
with mock.patch("uuid.uuid4", return_value=uuid.UUID(uuid_string)):
resp = self.app.post_json(
"/accounts",
{
"data": {
"id": "<EMAIL>",
"password": "<PASSWORD>",
"email-context": {
"name": "Alice",
"activation-form-url": "https://example.com",
},
}
},
status=201,
)
assert "activation-key" not in resp.json["data"]
assert "validated" in resp.json["data"]
assert not resp.json["data"]["validated"]
assert len(self.mailer.outbox) == 1
mail = self.mailer.outbox[0] # Get the validation email.
assert mail.sender == "<EMAIL>"
assert mail.subject == "Alice, activate your account <EMAIL>"
assert mail.recipients == ["<EMAIL>"]
# The {{bad-key}} from the template will be rendered as {bad-key} in
# the final email, instead of failing the formatting.
assert mail.body == f"https://example.com/<EMAIL>/{uuid_string} {{bad-key}}"
# The activation key is stored in the cache.
assert get_cached_validation_key("<EMAIL>", self.app.app.registry) == uuid_string
def test_cant_authenticate_with_unactivated_account(self):
self.app.post_json(
"/accounts",
{"data": {"id": "<EMAIL>", "password": "<PASSWORD>", "activated": False}},
status=201,
)
resp = self.app.get("/", headers=get_user_headers("<EMAIL>", "<PASSWORD>"))
assert "user" not in resp.json
def test_validation_fail_bad_user(self):
# Validation should fail on a non existing user.
resp = self.app.post_json("/accounts/<EMAIL>/validate/123", {}, status=403)
assert "Account ID and activation key do not match" in resp.json["message"]
def test_validation_fail_bad_activation_key(self):
uuid_string = "20e81ab7-51c0-444f-b204-f1c4cfe1aa7a"
with mock.patch("uuid.uuid4", return_value=uuid.UUID(uuid_string)):
self.app.post_json(
"/accounts", {"data": {"id": "<EMAIL>", "password": "<PASSWORD>"}}, status=201
)
# Validate the user.
resp = self.app.post_json(
"/accounts/<EMAIL>/validate/bad-activation-key", {}, status=403
)
assert "Account ID and activation key do not match" in resp.json["message"]
# The activation key is still in the cache
assert get_cached_validation_key("<EMAIL>", self.app.app.registry) is not None
def test_validation_validates_user(self):
# On user activation the 'validated' field is set to True.
uuid_string = "20e81ab7-51c0-444f-b204-f1c4cfe1aa7a"
with mock.patch("uuid.uuid4", return_value=uuid.UUID(uuid_string)):
self.app.post_json(
"/accounts",
{
"data": {
"id": "<EMAIL>",
"password": "<PASSWORD>",
"email-context": {"name": "Alice", "homepage": "https://example.com"},
}
},
status=201,
)
resp = self.app.post_json(
"/accounts/<EMAIL>/validate/" + uuid_string, {}, status=200
)
assert "validated" in resp.json
assert resp.json["validated"]
# An active user can authenticate.
resp = self.app.get("/", headers=get_user_headers("<EMAIL>", "12éé6"))
assert resp.json["user"]["id"] == "account:<EMAIL>"
# Once activated, the activation key is removed from the cache.
assert get_cached_validation_key("<EMAIL>", self.app.app.registry) is None
assert len(self.mailer.outbox) == 2 # Validation email, reset password email.
mail = self.mailer.outbox[1] # Get the confirmation email.
assert mail.sender == "<EMAIL>"
assert mail.subject == "Alice, your account <EMAIL> is now active"
assert mail.recipients == ["<EMAIL>"]
assert (
mail.body
== "Your account <EMAIL> has been successfully activated. Connect to https://example.com"
)
def test_previously_created_accounts_can_still_authenticate(self):
"""Accounts created before activating the 'account validation' option can still authenticate."""
# Create an account without going through the accounts API.
hashed_password = <PASSWORD>_password("<PASSWORD>")
self.app.app.registry.storage.create(
parent_id="alice",
resource_name="account",
record={"id": "alice", "password": <PASSWORD>},
)
resp = self.app.get("/", headers=get_user_headers("alice", "<PASSWORD>"))
assert resp.json["user"]["id"] == "account:alice"
def test_reset_password_bad_user(self):
resp = self.app.post_json("/accounts/<EMAIL>/reset-password", {}, status=200)
# Don't give information on the existence of a user id: return a generic message.
assert resp.json["message"] == "A temporary reset password has been sent by mail"
# Make sure no email was sent.
assert len(self.mailer.outbox) == 0
def test_reset_password_bad_email(self):
# Create an account without going through the accounts API.
hashed_password = <PASSWORD>_password("<PASSWORD>")
self.app.app.registry.storage.create(
parent_id="alice",
resource_name="account",
record={"id": "alice", "password": <PASSWORD>},
)
resp = self.app.post_json("/accounts/alice/reset-password", {}, status=400)
assert "user id should match" in resp.json["message"]
def test_reset_password_sends_email(self):
reset_password = "<PASSWORD>"
with mock.patch("uuid.uuid4", return_value=uuid.UUID(reset_password)):
# Create the user.
self.app.post_json(
"/accounts", {"data": {"id": "<EMAIL>", "password": "<PASSWORD>"}}, status=201
)
# Ask for a reset password.
resp = self.app.post_json(
"/accounts/<EMAIL>/reset-password",
{"data": {"email-context": {"name": "Alice"}}},
status=200,
)
assert resp.json["message"] == "A temporary reset password has been sent by mail"
assert len(self.mailer.outbox) == 2 # Validation email, reset password email.
mail = self.mailer.outbox[1] # Get the reset password email
assert mail.sender == "<EMAIL>"
assert mail.subject == "Alice, here is a temporary reset password for <EMAIL>"
assert (
mail.body
== f"You can use this temporary reset password {reset_password} to change your account <EMAIL> password"
)
# The reset password is stored in the cache.
cached_password = get_cached_reset_password(
"<EMAIL>", self.app.app.registry
).encode(encoding="utf-8")
pwd_str = reset_password.encode(encoding="utf-8")
assert bcrypt.checkpw(pwd_str, cached_password)
def test_fail_use_reset_password_bad_data(self):
validation_key = reset_password = "<PASSWORD>"
with mock.patch("uuid.uuid4", return_value=uuid.UUID(reset_password)):
# Create the user.
self.app.post_json(
"/accounts", {"data": {"id": "<EMAIL>", "password": "<PASSWORD>"}}, status=201
)
| |
hasattr(self.callbacks,'logger'):
return tcgSupport.fail(self.callbacks.logger,self.callbacks.devname,StatusCode,op=currentFuncName(1), msg=msg, status=status)
def getRange(self, rangeNo, auth, authAs=None):
'''
Reads a band from the drive.
Parameters:
rangeNo - the band to read
auth - Default auth in case if authAs is None
Optional named parameters:
authAs - tuple of authority, credential, or AuthAs structure. Defaults to (Anybody)
Returns a Range object with values read. Attributes of this object are
reflected from the names as specified in the TCG specification.
Consult setRange named parameters for attribute definitions.
'''
status, rv, kwrv = self.invoke('Band%d' % rangeNo, 'Get',
authAs=self._getAuthAs(authAs, auth))
if status != StatusCode.Success:
return self.fail(rv, status)
str_kwrv = tcgSupport.convert(kwrv)
if len(str_kwrv) == 0:
return None, True
if self.SSC != 'Enterprise':
for key in list(locking_table.keys()):
str_kwrv[key] = str_kwrv[locking_table[key]]
for key in list(str_kwrv.keys()):
if not isinstance(key, str):
del str_kwrv[key]
str_kwrv['LockOnReset'] = 0 in str_kwrv['LockOnReset']
return SedObject(str_kwrv), True
def setRange(self, auth, rangeNo, authAs=None, **kwargs):
'''
Modifies a bands fields. Support provided only for Enterprise and Opalv2.0
Parameters:
rangeNo - The band to modify. (required)
auth - Default auth in case if authAs is None
Optional named parameters:
authAs - Tuple of authority, credential, or AuthAs structure.
RangeStart - The starting LBA of the band.
RangeLength - The number of LBAs included in the band.
ReadLocked - Prohibit read access to the band (True) or allow read access to the band (False)
ReadLockEnabled - Enable (True) ReadLocked field for this band.
WriteLocked - Prohibit write access to the band (True) or allow write access to the band (False)
WriteLockEnabled- Enable (True) WriteLocked field for this band.
LockOnReset - Enable locks on power cycle (True) or do not modify locks on power cycle (False)
'''
for key, value in list(kwargs.items()):
if key == 'LockOnReset':
value = [0] if kwargs.get('LockOnReset') == str(True) else []
self.token.update({key:value})
arg = tcgSupport.tokens(self)
status, rv, kwrv = self.invoke('Band%d' % rangeNo, 'Set', arg,
authAs=self._getAuthAs(authAs, auth),
**self.token)
self.token.clear()
if status != StatusCode.Success:
return self.fail(rv, status)
return True
def enable_range_access(self, objectId, user, auth, authAs=None):
'''
Provides band access to users. Opal 2.0 specific method.
Parameters:
objectId - Locking Range object value.
user - User to whom access needs to be provided.
auth - Default auth in case if authAs is None.
Optional Parameters:
authAs - Tuple of authority, credential, or AuthAs structure.
'''
Userno = int(''.join(filter(str.isdigit, user)))
if Userno == 1:
User = baseObjectIds['User##']
else:
User = baseObjectIds['User##'] + Userno
status, rv, kwrv = self.invoke(objectId, 'Set', (1, [(3, [("\x00\x00\x0C\x05", struct.pack(">Q", User)), ("\x00\x00\x0C\x05", struct.pack(">Q", User)), ("\x00\x00\x04\x0E", 1)])]),
authAs=self._getAuthAs(authAs, auth),
noNamed=True,
useTls=True)
if status != StatusCode.Success:
return self.fail(rv, status)
return True
def get_MEK(self, rangeNo, auth, authAs=None):
'''
Obtain the Media Encrytion Key (MEK) UID for the range from the Locking Table Support provided only for Opal2.0.
Parameters:
rangeNo - The band number. (required)
auth - Default auth in case if authAs is None
Optional Parameters:
authAs - Tuple of authority, credential, or AuthAs structure
'''
status, rv, kwrv = self.invoke('Band%d' % rangeNo, 'Get', ([(3, 0x0A), (4, 0x0A)]),
authAs=self._getAuthAs(authAs, auth),
noNamed=True,
useTls=True)
if status != StatusCode.Success:
return self.fail(rv, status)
kwrv['K_AES_256_Range' + str(rangeNo) + '_Key_UID'] = kwrv.pop(list(kwrv.keys())[0])
return SedObject(kwrv), True
def erase(self, rangeNo, authAs=None):
'''
Erases a band. Support provided only for Enterprise.
Parameters:
rangeNo - the band to modify
Optional parameters:
authAs - tuple of authority, credential, or AuthAs structure.
'''
status, rv, kwrv = self.invoke('Band%d' % rangeNo, 'Erase',
authAs=self._getAuthAs(authAs, 'EraseMaster'),
noNamed=True)
if status != StatusCode.Success:
return self.fail(rv, status)
return True
def gen_key(self, range_key, auth, authAs=None):
'''
Performs a secure erase of the range. Support provided only for Opal2.0.
Parameters:
range_key - Key Object value as an hexadecimal number
Optional parameters:
authAs - tuple of authority, credential, or AuthAs structure.
'''
status, rv, kwrv = self.invoke(range_key, 'GenKey',
authAs=self._getAuthAs(authAs, auth),
noClose=True,
noNamed=False,
useTls=True)
if status != StatusCode.Success:
return self.fail(rv, status)
return True
def changePIN(self, auth, pin, authAs=None, obj=None):
'''
Modify credentials for an authority. Support provided only for Enterprise and Opalv2.0
auth - An authority string or numeric value identifying the authority to modify.
pin - The new PIN to apply to this authority.
authAs - tuple of authority, credential, or AuthAs structure.
'''
obj = auth if obj == None else obj
self.token.update({'PIN':pin})
arg = tcgSupport.tokens(self)
status, rv, kwrv = self.invoke(obj, 'Set', arg,
authAs=self._getAuthAs(authAs, auth),
useTls=True,
**self.token)
self.token.clear()
if status != StatusCode.Success:
return self.fail(rv, status)
return True
def checkPIN(self, auth, pin):
'''
Validate credentials for an authority. Support provided only for Enterprise and Opalv2.0
Parameters:
auth - A Authority string or numeric value identifying the authority to modify.
pin - The PIN to validate. May be a string or an object with the attribute 'plainText'.
Returns True if successfully authenticated, False otherwise. Does not invoke fail method.
'''
return self._checkPIN(auth, pin)
def writeaccess(self, user, tableno, authAs=None):
'''
Provides DataStore Table write access to users. Opal 2.0 specific method.
Parameters:
user - User to whom access needs to be provided.
Optional Parameters:
authAs - Tuple of authority, credential, or AuthAs structure.
'''
if int(''.join(filter(str.isdigit, user))) == 1:
User = baseObjectIds['User##']
else:
User = baseObjectIds['User##'] + int(''.join(filter(str.isdigit, user)))
status, rv, kwrv = self.invoke('ACE_DataStore%d_Set_All' % tableno, 'Set',
(1, [(3, [("\x00\x00\x0C\x05", struct.pack(">Q", User))])]),
noNamed=True,
sp='LockingSP',
authAs=self._getAuthAs(authAs, 'Admin1'))
if status != StatusCode.Success:
return False
return True
def readaccess(self, user, tableno, authAs=None):
'''
Provides DataStore Table read access to users. Opal 2.0 specific method.
Parameters:
user - User to whom access needs to be provided.
Optional Parameters:
authAs - Tuple of authority, credential, or AuthAs structure.
'''
if int(''.join(filter(str.isdigit, user))) == 1:
User = baseObjectIds['User##']
else:
User = baseObjectIds['User##'] + int(''.join(filter(str.isdigit, user)))
status, rv, kwrv = self.invoke('ACE_DataStore%d_Get_All' % tableno, 'Set',
(1, [(3, [("\x00\x00\x0C\x05", struct.pack(">Q", User))])]),
noNamed=True,
sp='LockingSP',
authAs=self._getAuthAs(authAs, 'Admin1'))
if status != StatusCode.Success:
return False
return True
def readData(self, auth, authAs=None):
'''
Read the SED DataStore. Data is available as the callback.dataStore attribute.
Support provided only for Enterprise.
Optional named parameters:
authAs - tuple of authority, credential, or AuthAs structure. Defaults to (Anybody).
Returns the DataStore object of non-volatile values, None when datastore is empty, False on error.
'''
authAs = self._getAuthAs(authAs, auth)
if self.checkPIN(authAs[0], self.mSID) == True:
authAs = (authAs[0], self.mSID)
if ''.join(re.split("[^a-zA-Z]+", auth)) == "User":
name_value = ([(0o1, 00), (0o2, self.data_length)])
else:
name_value = [('startRow', 0)]
status, rv, kwrv = self.invoke('DataStore', 'Get',
name_value,
sp='LockingSP',
authAs=authAs,
noNamed=True,
)
if status != StatusCode.Success:
return self.fail(rv, status)
elif len(rv) > 0:
self.callbacks.dataStore = tcgSupport.fromSerialized(rv[0])
return self.callbacks.dataStore
return None
def writeData(self, auth, data, authAs=None):
'''
Write the SED DataStore.
Optional named parameters:
authAs - tuple of authority, credential, or AuthAs structure. Defaults to (BandMaster0, mSID).
Needs to authenticate as any BandMaster or EraseMaster.
Returns True when data is written.
Returns False if data is invalid or data is not dirty.
'''
authAs = self._getAuthAs(authAs, auth)
if self.checkPIN(authAs[0], self.mSID) == True:
authAs = (authAs[0], self.mSID)
if ''.join(re.split("[^a-zA-Z]+", auth)) == "User":
name_value, s_data = (00, 00), (0o1, tcgSupport.serialize(data))
else:
name_value, s_data = [('startRow', 0)], tcgSupport.serialize(data)
status, rv, kwrv = self.invoke('DataStore', 'Set',
name_value,
s_data,
sp='LockingSP',
authAs=authAs,
noNamed=True,
)
if status != StatusCode.Success:
return self.fail(rv, status)
return True
def getPort(self, uid, authAs=None):
'''
Retrieve the port table for the specified port uid.Support provided only for Enterprise and Opalv2.0
Parameters:
uid - Port UID. Port UIDs are enumerable through the ports attribute.
Optional named parameters:
authAs - tuple of authority, credential, or AuthAs structure.
Returns a Port object with attributes reflected from the TCG object table fields.
Consult setPort named parameters for attribute definitions.
'''
status, rv, kwrv = self.invoke(uid, 'Get',
authAs=self._getAuthAs(authAs, 'SID')
)
if status != StatusCode.Success:
return self.fail(rv, status)
if len(kwrv) == 0:
return None
str_kwrv = tcgSupport.convert(kwrv)
if self.SSC != 'Enterprise':
for key, val in portlocking_table.items():
str_kwrv[key] = str_kwrv[portlocking_table[key]]
if 'LockOnReset' in str_kwrv:
str_kwrv['LockOnReset'] = 0 in str_kwrv['LockOnReset']
if 'PortLocked' in kwrv:
str_kwrv['PortLocked'] = bool(str_kwrv['PortLocked'])
if 'UID' in str_kwrv:
str_kwrv['UID'] = uid
return SedObject(str_kwrv)
def setPort(self, port, authAs=None, **kwargs):
'''
Set the locked | |
lambda t: (time2str(t,cad='%Y-%m-%d_%H:%M:%S')
+('%0.3f'%(t%1)).lstrip('0')) #taurustrend timestamp format
ml = min(len(v) for v in table.values())
for i in range(ml): #len(table.values()[0])):
csv+=sep.join([time_to_text(table.values()[0][i][0]),
str(table.values()[0][i][0])]
+[value_to_text(table[k][i][1]) for k in keys])
csv+=linesep
print('Text file generated in %d milliseconds'%(1000*(time.time()-start)))
return csv
def correlate_values(self,values,stop=None,resolution=None,debug=False,rule=None,MAX_VALUES=50000):
''' Correlates values to have all epochs in all columns
:param values: {curve_name:[values]}
:param resolution: two epochs with difference smaller than resolution will be considered equal
:param stop: an end date for correlation
:param rule: a method(tupleA,tupleB,epoch) like (min,max,median,average,last,etc...) that will take two last column (t,value) tuples and time and will return the tuple to keep
'''
start = time.time()
self.log.info('correlate_values(%d x %d,resolution=%s,MAX_VALUES=%d) started at %s'%(
len(values),max(len(v) for v in values.values()),resolution,MAX_VALUES,time.ctime(start)))
stop = stop or start
keys = sorted(values.keys())
table = dict((k,list()) for k in keys)
index = dict((k,0) for k in keys)
lasts = dict((k,(0,None)) for k in keys)
first,last = min([t[0][0] if t else 1e12 for t in values.values()]),max([t[-1][0] if t else 0 for t in values.values()])
if resolution is None:
#Avg: aproximated time resolution of each row
avg = (last-first)/min((MAX_VALUES/6,max(len(v) for v in values.values()) or 1))
if avg < 10: resolution = 1
elif 10 <= avg<60: resolution = 10
elif 60 <= avg<600: resolution = 60
elif 600 <= avg<3600: resolution = 600
else: resolution = 3600 #defaults
self.log.info('correlate_values(...) resolution set to %2.3f -> %d s'%(avg,resolution))
assert resolution>.1, 'Resolution must be > 0'
if rule is None: rule = fn.partial(choose_first_value,tmin=-resolution*10)
#if rule is None: rule = fn.partial(choose_last_max_value,tmin=-resolution*10)
epochs = range(int(first-resolution),int(last+resolution),int(resolution))
for k,data in values.items():
self.log.info('Correlating %s->%s values from %s'%(len(data),len(epochs),k))
i,v,end = 0,data[0] if data else (first,None),data[-1][0] if data else (last,None)
for t in epochs:
v,tt = None,t+resolution
#Inserted value will be (<end of interval>,<correlated value>)
#The idea is that if there's a value in the interval, it is chosen
#If there's no value, then it will be generated using previous/next values
#If there's no next or previous then value will be None
#NOTE: Already tried a lot of optimization, reducing number of IFs doesn't improve
#Only could guess if iterating through values could be better than iterating times
if i<len(data):
for r in data[i:]:
if r[0]>(tt):
if v is None: #No value in the interval
if not table[k]: v = (t,None)
else: v = rule(*[table[k][-1],r,tt]) #Generating value from previous/next
break
#therefore, r[0]<=(t+resolution)
else: i,v = i+1,(t,r[1])
## A more ellaborated election (e.g. to maximize change)
#elif v is None:
#i,v = i+1,(t,r[1])
#else:
#i,v = i+1,rule(*[v,r,tt])
else: #Filling table with Nones
v = (t+resolution,None)
table[k].append((tt,v[1]))
self.log.info('\t%s values in table'%(len(table[k])))
self.log.info('Values correlated in %d milliseconds'%(1000*(time.time()-start)))
return table
#################################################################################################
#################################################################################################
def get_extractor_values(self, attribute, start_date, stop_date, decimate, asHistoryBuffer):
""" Getting attribute values using Java Extractors """
self.log.info('Using Java Extractor ...')
try:
extractor = self.get_extractor(attribute=attribute)
#self.clean_extractor(extractor)
result = self.__extractorCommand(extractor,'GetAttDataBetweenDates',[attribute,start_date,stop_date])
vattr,vsize=str(result[1][0]),int(result[0][0])
time.sleep(0.2)
if vattr not in [a.name for a in extractor.attribute_list_query()]:
raise Exception,'%s_NotIn%sAttributeList'%(vattr,extractor.name())
self.log.debug( '\treading last value of attribute %s'%vattr)
last_value = extractor.read_attribute(vattr).value
self.log.debug('\treading %s attribute history values of %s (last_value = %s)'% (vsize,vattr,last_value))
history=extractor.attribute_history(vattr,vsize)
if N>0: history = history[-N:]
#DECIMATION IS DONE HERE ##########################################################################
if decimate:
nhist,l0 = [],len(history)
for i,h in enumerate(history):
#if not i or h.value!=history[i-1].value or ((i+1)<l0 and history[i+1].value!=h.value) or h.time.tv_sec>=(300+nhist[-1].time.tv_sec):
if not i or data_has_changed(h_to_tuple(h),h_to_tuple(history[i-1]),h_to_tuple(history[i+1]) if i+1<l0 else None):
nhist.append(h)
self.log.debug('\tIn get_attribute_values(%s,...).extractor: decimated repeated results ... %s -> %s'%(attribute,len(history),len(nhist)))
history = nhist
#Sorting extracted values
try: history=[v for t,u,v in sorted((h.time.tv_sec,h.time.tv_usec,h) for h in history)]
except Exception,e: self.log.error('Unable to sort history values: %s'%e)
self.clean_extractor(extractor,vattr)
self.attr_extracted[attribute]=(lambda s: s if ':' in s else self.tango_host+'/'+s)(extractor.name())
except Exception,e:
self.log.warning( traceback.format_exc())
raise Exception,'Archiving.Reader_ExtractorFailed(%s)!:%s' % (extractor.name(),str(e))
if int(PyTango.__version__.split('.')[0])>=7:
values = asHistoryBuffer and history or [(ctime2time(h.time),h.value) for h in history]
self.last_reads = history and (ctime2time(history[0].time),ctime2time(history[-1].time)) or (1e10,1e10)
else:
values = asHistoryBuffer and history or [(ctime2time(h.value.time),h.value.value) for h in history]
self.last_reads = history and (ctime2time(history[0].value.time),ctime2time(history[-1].value.time)) or (1e10,1e10)
return values
def clear_cache(self):
self.cache.clear()
for m in dir(self):
try:
m = getattr(self,m)
if fn.isCallable(m) and hasattr(m,'cache'):
m.cache.clear()
except:
traceback.print_exc()
def clean_extractor(self,extractor,vattr=None):
''' removing dynamic attributes from extractor devices ...'''
#self.log.debug('In PyTangoArchiving.Reader.__cleanExtractor(): removing dynamic attributes')
self.log.debug( 'In PyTangoArchiving.Reader.__cleanExtractor(): removing dynamic attributes')
self.log.debug( '%s(%s)'%(type(extractor),extractor) )
if hasattr(extractor,'dev_name'):
name,proxy=extractor.dev_name(),extractor
else:
name,proxy=str(extractor),self.servers.proxies[str(extractor)]
if vattr: proxy.RemoveDynamicAttribute(vattr)
else: proxy.RemoveDynamicAttributes()
#def __initMySQLconnection(self):
#try: self.db = MySQLdb.connect(db=self.db_name,host=self.host,user=self.user,passwd=<PASSWORD>)
#except Exception,e:
#self.log.error( 'Unable to create a MySQLdb connection to "%s"@%s.%s: %s'%(self.user,self.host,self.db_name,traceback.format_exc()))
#self.db = None
def __extractorCommand(self,extractor=None,command='',args=[]):
if not command: raise Exception,'Reader__extractorCommand:CommandArgumentRequired!'
if not extractor: extractor = self.get_extractor()
extractor.ping()
try:
self.log.debug( 'in damn Reader.__extractorCommand: calling HdbExtractor(%s).%s(%s)'%(extractor.name(),command,args))
result = extractor.command_inout(*([command]+(args and [args] or [])))
except PyTango.DevFailed, e:
#e.args[0]['reason'],e.args[0]['desc'],e.args[0]['origin']
reason = '__len__' in dir(e.args[0]) and e.args[0]['reason'] or e.args[0]
if 'Broken pipe' in str(reason):
extractor.init()
result = extractor.command_inout(*([command]+(args and [args] or [])))
elif 'MEMORY_ERROR' in str(reason):
#raise Exception,'Extractor_%s'%reason
self.clean_extractor(extractor.name())
extractor.init()
result = extractor.command_inout(*([command]+(args and [args] or [])))
else:
self.log.warning(traceback.format_exc())
raise Exception,'Reader__extractorCommand:Failed(%s)!'% str(e)
#self.log.debug( 'in Reader.__extractorCommand: command finished')
return result
#################################################################################################
# Multiprocess Class for Reader
#################################################################################################
class ReaderByBunches(Reader):
"""
Class that splits in bunches every query done against the database.
It allows only database queries; not extractor devices
It uses multiprocessing and threading to run queries in parallel
"""
DEFAULT_BUNCH_SIZE = 1000
def init_buncher(self):
self._process_event,self._threading_event,self._command_event = multiprocessing.Event(),threading.Event(),threading.Event()
self._receiver = threading.Thread(target=self._receive_data)
self._receiver.daemon = True #Therefore, keep_alive routines should not be needed!
self._last_alive = time.time()
self.callbacks = defaultdict(list)
def __del__(self):
self.stop()
type(self).__base__.__del__(self)
def start(self):
self._reader.start()
self._receiver.start()
self.get_attributes()
def stop(self):
self.log.info('ReaderProces().stop()')
self._process_event.set(),self._threading_event.set()
self._pipe1.close(),self._pipe2.close()
def alive(self):
if not self._reader.is_alive():
raise Exception('ReaderProcess is not Alive!!! (last contact at %s)'%time.ctime(self._last_alive))
self._last_alive = time.time()
return self._last_alive
# Protected methods
@staticmethod
def get_key(d):
return str(sorted(d.items()))
def _receive_data(self):
"""
It will be receive data who will really process the data
In that process, first check the cache, if the query is contained by a cached array, just return it
Then, for each query, split in bunches and with each result launch callback and update cache.
Finally, proceed with the next queried value
"""
while not self._threading_event.is_set():
try:
#self.log.info('... ReaderThread: Polling ...')
assert self._reader.is_alive()
if self._pipe1.poll(0.1):
#self.log.info('... ReaderThread: Receiving ... (%s)'%(ReaderProcess.__instances.keys()))
key,query = self._pipe1.recv()
if key.lower() in self.asked_attributes:
#Updating last_dates dictionary
self.debug('... ReaderThread: Received %s last_dates %s'%(key,query))
self.last_dates[key] = query
else:
self.log.info('... ReaderThread: Received data = %s [%s]; %s queries pending'%(
key,isSequence(query) and len(query) or type(query),len(self.callbacks)))
for callback in self.callbacks[key]:
try:
self.debug('\tlaunching callback %s'%callback)
callback(query)
except:
self.warning('\tError in %s callback %s!'%(key,callback))
self.warning(traceback.format_exc())
self.callbacks.pop(key)
except Exception,e:
self.warning('\tError in thread!\n%s'%(traceback.format_exc()))
self._threading_event.wait(0.1)
self.log.info('Exiting PyTangoArchiving.ReaderProcess()._receive_data thread')
def _send_query(self,key,query,callback):
assert self.alive()
if key not in self.callbacks:
self.callbacks[key] = [callback]
self._pipe1.send((key,query))
elif callback not in self.callbacks[key]:
self.callbacks[key].append(callback)
return
def get_attribute_values(self,attribute,callback,start_date,stop_date=None,
asHistoryBuffer=False,decimate=False,notNone=False,N=0):
"""This method should be capable of>
- cut queries in pieces,
- execute a callback for each of
- but, keep the complete query in cache (for reading arrays)
This method reads values for an attribute between specified dates.
This method may use MySQL queries or an H/TdbExtractor DeviceServer to get the values from the database.
The format of values returned is [(epoch,value),]
The flag 'asHistoryBuffer' forces to return the rawHistBuffer returned by the DS.
:param attributes: list of attributes
:param start_date: timestamp of the first value
:param stop_date: timestamp of the last value
:param asHistoryBuffer: return a history buffer object instead of a list (for trends)
:param N: if N>0, only the last N values will be returned
:param decimate: remove repeated values, False by default but True when called from trends
:return: a list with values (History or tuple values depending of args)
"""
#Previous implementation was discarded due to this exception
#raise Exception("MySQLdb.SSCursor.fetchmany failed due to (2013, 'Lost connection to MySQL server during query')")
assert self.alive()
"""
This method will just put the query in the queue
"""
decimate,window = decimate if isSequence(decimate) else (decimate,'0')
if callable(decimate): decimate = decimate.__module__+'.'+decimate.__name__
query = {'attribute':attribute,'start_date':start_date,'stop_date':stop_date,'asHistoryBuffer':asHistoryBuffer,'decimate':(decimate,window),'N':N}
assert hasattr(callback,'__call__'),'2nd argument must be callable'
self.asked_attributes.append(attribute.lower())
key = self.get_key(query)
self.log.info('thread.send_query(%s)'%key)
self._send_query(key,query,callback)
def get_attributes_values(self,attributes,callback,start_date,stop_date=None,
correlate=False,asHistoryBuffer=False,trace = False, text = False, N=0
):
"""
Works like | |
idx):
input = self.tensor[idx]
target = self.classes[idx]
return input, target
def __len__(self):
return len(self.tensor)
class MultiTransformDataset(data.Dataset):
def __init__(self, dataset, t):
self.dataset = dataset
self.transform = transforms.Compose(
[transforms.ToPILImage()] +
t)
def __getitem__(self, idx):
input, target = self.dataset[idx]
return input, self.transform(input), target
def __len__(self):
return len(self.dataset)
class QuickDrawDataset(data.Dataset):
def __init__(self, root, classes, transform):
self.classes = classes
self.labels = torch.arange(len(classes))
self.transform = transform
self.qdd = QuickDrawData(recognized=True, max_drawings=10000, cache_dir=root)
self.qdd.load_drawings(classes)
def __getitem__(self, idx):
c = self.classes[idx%len(self.classes)]
label = self.labels[idx%len(self.classes)]
img = self.qdd.get_drawing(c).image
if self.transform:
img = self.transform(img)
return img, label
def __len__(self):
return 10000
#return len(self.classes)
class USPS(data.Dataset):
"""`USPS <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps>`_ Dataset.
The data-format is : [label [index:value ]*256 \n] * num_lines, where ``label`` lies in ``[1, 10]``.
The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]``
and make pixel values in ``[0, 255]``.
Args:
root (string): Root directory of dataset to store``USPS`` data files.
split (string): One of {'train', 'test'}.
Accordingly dataset is selected.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
'train': [
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2",
"usps.bz2", 7291
],
'test': [
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2",
"usps.t.bz2", 2007
],
}
def __init__(self,
root,
split='train',
transform=None,
target_transform=None,
download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.split = split # training set or test set
if self.split not in self.split_list:
raise ValueError('Wrong split entered! Please use split="train" '
'or split="test"')
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.total_images = self.split_list[split][2]
full_path = os.path.join(self.root, self.filename)
if download and not os.path.exists(full_path):
self.download()
import bz2
fp = bz2.open(full_path)
datas = []
targets = []
for line in tqdm(
fp, desc='processing data', total=self.total_images):
label, *pixels = line.decode().split()
pixels = [float(x.split(':')[-1]) for x in pixels]
im = np.asarray(pixels).reshape((16, 16))
im = (im + 1) / 2 * 255
im = im.astype(dtype=np.uint8)
datas.append(im)
targets.append(int(label) - 1)
assert len(targets) == self.total_images, \
'total number of images are wrong! maybe the download is corrupted?'
self.data = np.stack(datas, axis=0)
self.targets = targets
self.labels = list(range(10))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img, mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
def download(self):
download_url(self.url, self.root, self.filename, md5=None)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Split: {}\n'.format(self.split)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(
tmp,
self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(
tmp,
self.target_transform.__repr__().replace('\n',
'\n' + ' ' * len(tmp)))
return fmt_str
class MNISTM(data.Dataset):
"""`MNIST-M Dataset."""
url = "https://github.com/VanushVaswani/keras_mnistm/releases/download/1.0/keras_mnistm.pkl.gz"
raw_folder = "raw"
processed_folder = "processed"
training_file = "mnist_m_train.pt"
test_file = "mnist_m_test.pt"
def __init__(self, root, mnist_root="data", train=True, transform=None, target_transform=None, download=False):
"""Init MNIST-M dataset."""
super(MNISTM, self).__init__()
self.root = os.path.expanduser(root)
self.mnist_root = os.path.expanduser(mnist_root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found." + " You can use download=True to download it")
if self.train:
self.train_data, self.train_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.training_file)
)
else:
self.test_data, self.test_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.test_file)
)
def __getitem__(self, index):
"""Get images and target for data loader.
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.squeeze().numpy(), mode="RGB")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
"""Return size of dataset."""
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and os.path.exists(
os.path.join(self.root, self.processed_folder, self.test_file)
)
def download(self):
"""Download the MNIST data."""
# import essential packages
from six.moves import urllib
import gzip
import pickle
from torchvision import datasets
# check if dataset already exists
if self._check_exists():
return
# make data dirs
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
# download pkl files
print("Downloading " + self.url)
filename = self.url.rpartition("/")[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
if not os.path.exists(file_path.replace(".gz", "")):
data = urllib.request.urlopen(self.url)
with open(file_path, "wb") as f:
f.write(data.read())
with open(file_path.replace(".gz", ""), "wb") as out_f, gzip.GzipFile(file_path) as zip_f:
out_f.write(zip_f.read())
os.unlink(file_path)
# process and save as torch files
print("Processing...")
# load MNIST-M images from pkl file
with open(file_path.replace(".gz", ""), "rb") as f:
mnist_m_data = pickle.load(f, encoding="bytes")
mnist_m_train_data = torch.ByteTensor(mnist_m_data[b"train"])
mnist_m_test_data = torch.ByteTensor(mnist_m_data[b"test"])
# get MNIST labels
mnist_train_labels = datasets.MNIST(root=self.mnist_root, train=True, download=True).train_labels
mnist_test_labels = datasets.MNIST(root=self.mnist_root, train=False, download=True).test_labels
# save MNIST-M dataset
training_set = (mnist_m_train_data, mnist_train_labels)
test_set = (mnist_m_test_data, mnist_test_labels)
with open(os.path.join(self.root, self.processed_folder, self.training_file), "wb") as f:
torch.save(training_set, f)
with open(os.path.join(self.root, self.processed_folder, self.test_file), "wb") as f:
torch.save(test_set, f)
print("Done!")
class BSVHN(data.Dataset):
"""`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset.
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which
expect the class labels to be in the range `[0, C-1]`
Args:
root (string): Root directory of dataset where directory
``SVHN`` exists.
split (string): One of {'train', 'test', 'extra'}.
Accordingly dataset is selected. 'extra' is Extra training set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = ""
filename = ""
file_md5 = ""
split_list = {
'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"],
'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"],
'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]}
def __init__(self, root, split='train',
transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.split = split # training set or test set or extra set
if self.split not in self.split_list:
raise ValueError('Wrong split entered! Please use split="train" '
'or split="extra" or split="test"')
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat['X']
# loading from the .mat file gives an np array of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.labels = loaded_mat['y'].astype(np.int64).squeeze()
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.labels, self.labels == 10, 0)
self.labels_idx = [np.where(self.labels == i)[0] for i in range(10)]
self.data = np.transpose(self.data, (3, 2, 0, 1))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
np.random.seed()
label = np.random.randint(0, 10)
idxes = self.labels_idx[label]
index = np.random.choice(idxes)
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, | |
actions for a set of members.
These ACL classes can then be bundled and attached to a base base using
the ``ACLSet`` class.
======== ================
Type Description
======== ================
Scope The scope specifies where the ACL is valid for, e.g. ONE-level, all SUB-levels or RESET previous ACLs
Members A list of users this acl is valid for.
Role Instead of actions you can also refer to a ACLRole object.
Actions You can have multiple actions, where one action is described by ``a topic``, a ``set of acls`` and additional ``options`` that have to be checked while ACLs are resolved.
======== ================
>>> # Create an ACLSet object
>>> aclset = ACLSet()
>>> # Create an ACL object and attach it to the ACLSet
>>> acl = ACL()
>>> acl.set_priority(0)
>>> acl.set_members([u"user1", u"user2"])
>>> acl.add_action('^org\.clacks\.event\.ClientLeave$', 'rwx')
>>> aclset.add(acl)
>>> # Now add the set to the resolver
>>> resolver = ACLResolver()
>>> resolver.add_acl_set(aclset)
>>> # You can now check for acls, both should return True now.
>>> resolver.check('user1', 'org.clacks.event.ClientLeave', 'r')
>>> resolver.check('user1', 'org.clacks.event.ClientLeave', 'rwx')
ACL members can also contain regular expressions, like this:
>>> acl.set_members([u"user1", u"^user[0-9]*$"])
>>> ...
>>> resolver.check('user45', 'org.clacks.event.ClientLeave', 'r')
"""
priority = None
ONE = 1
SUB = 2
PSUB = 3
RESET = 4
members = None
actions = None
scope = None
uses_role = False
role = None
id = None
def __init__(self, scope=SUB, role=None):
self.env = Environment.getInstance()
self.log = logging.getLogger(__name__)
self.actions = []
self.members = []
r = PluginRegistry.getInstance("ACLResolver")
self.id = r.get_next_acl_id()
# Is this a role base or manually configured ACL object.
if role:
self.use_role(role)
else:
if scope not in (ACL.ONE, ACL.SUB, ACL.PSUB, ACL.RESET):
raise TypeError(C.make_error("ACL_INVALID_SCOPE", scope=scope))
self.set_scope(scope)
def use_role(self, rolename):
"""
Mark this ACL to use a role instead of direct permission settings.
============== =============
Key Description
============== =============
rolename The name of the role to use.
============== =============
"""
if not isinstance(rolename, str):
raise ACLException(C.make_error("ATTRIBUTE_INVALID", "rolename", str.__name__))
r = PluginRegistry.getInstance("ACLResolver")
if rolename in r.acl_roles:
self.uses_role = True
self.role = rolename
else:
raise ACLException(C.make_error("ROLE_NOT_FOUND", role=rolename))
def set_scope(self, scope):
"""
This methods updates the ACLs scope level.
See :class:`clacks.agent.acl.ACL` for details on the scope-levels.
============== =============
Key Description
============== =============
priority The new priority value for this ACl.
============== =============
"""
if scope not in [ACL.ONE, ACL.SUB, ACL.PSUB, ACL.RESET]:
raise TypeError(C.make_error("ACL_INVALID_SCOPE", scope=scope))
if self.uses_role:
raise ACLException(C.make_error("ACL_INVALID_SCOPE_TARGET"))
self.scope = scope
def set_priority(self, priority):
"""
Sets the priority of this ACL object. Lower values mean higher priority.
If no priority is given, a priority of 0 will be used when this ACL gets added to an ACLSet, the next will get 1, then 2 aso.
============== =============
Key Description
============== =============
priority The new priority value for this ACl.
============== =============
Example::
aclset = ACLSet()
acl = ACL(scope=ACL.ONE)
acl.set_members([u'tester1', u'tester2'])
acl.add_action('^org\.clacks\.event\.ClientLeave$', 'rwx')
acl.set_priority(100)
"""
self.priority = priority
def set_members(self, members):
"""
Set the members for this acl
============== =============
Key Description
============== =============
members A list of user names
============== =============
Example::
aclset = ACLSet()
acl = ACL(scope=ACL.ONE)
acl.set_members([u'peter', u'klaus'])
"""
if type(members) != list:
raise ACLException(C.make_error("ATTRIBUTE_INVALID", "members", list.__name__))
self.members = members
def clear_actions(self):
"""
This method removes all defined actions from this acl.
"""
self.role = None
self.uses_role = False
self.actions = []
def add_action(self, topic, acls, options=None):
"""
Adds a new action to this ACL object.
============== =============
Key Description
============== =============
topic The topic action we want to create ACLs for. E.g. 'org.clacks.factory.Person'
acls The acls this action contain. E.g. 'rwcdm'.
options Special additional options that have to be checked.
============== =============
.. _topic_description:
**Topic**
Topics are defined as regular expressions, which gives a huge flexibility.
For example ``^clacks\.[^\.]*\.factory$`` would match for:
* clacks.test.factory
* clacks.hallo.factory
but not for:
* clacks.factory
* clacks.level1.level2.factory
Where ``^clacks\..*\.factory$`` matches for:
* clacks.factory
* clacks.level1.factory
* clacks.level1.level2.factory
.. _acls_description:
**Acls**
The acls paramter describes the action we can perform on a given ``topic``.
Possible actions are:
* r - Read
* w - Write
* c - Create
* d - Delete
* o - Owner only, this acl affects only logged in user itself.
* m - Manager, this acl applies for the manager of on object.
* s - Search - or being found
* x - Execute
* e - Receive event
The actions have to passed as a string, which contains all actions at once::
>>> add_action(``topic``, "rwcdm", ``options``)
.. _options_description:
**Options**
Options are additional check parameters that have to be fulfilled to get this acl to match.
The ``options`` parameter is a dictionary which contains a key and a value for each additional option we want to check for, e.g. ::
>>> add_action('topic', 'acls', {'uid': 'hanspeter', 'ou': 'technik'})
If you've got a user object (``user1``) as dictionary, then you can check permissions like this::
>>> resolver.check('some.topic', 'rwcdm', user1)
The resolver will then check if the keys ``uid`` and ``ou`` are present in the user1 dictionary and then check if the values match.
If not all options match, the ACL will not match.
"""
if options and type(options) != dict:
raise ACLException(C.make_error("ATTRIBUTE_INVALID", "options", dict.__name__))
if self.uses_role and self.role:
raise ACLException(C.make_error("ACL_TYPE_MISMATCH"))
# Check given acls allowed are 'rwcdsexom'
if not all(map(lambda x: x in 'rwcdsexom', acls)):
raise ACLException(C.make_error("ACL_STRING_INVALID"))
acl = {
'topic': topic,
'acls': acls,
'options': options if options else {}}
self.actions.append(acl)
def get_members(self):
"""
Returns the list of members this ACL is valid for.
"""
return self.members
def __str__(self):
return self.repr_self()
def repr_self(self, indent=0):
"""
Generates a human readable representation of the ACL-object.
"""
if self.uses_role:
r = PluginRegistry.getInstance("ACLResolver")
rstr = "\n%s<ACL> %s" % (" " * indent, str(self.members))
rstr += "\n%s" % r.acl_roles[self.role].repr_self(indent + 1)
else:
rstr = "\n%s<ACL scope(%s)> %s: " % ((" " * indent), self.scope, str(self.members))
for entry in self.actions:
rstr += "\n%s%s:%s %s" % ((" " * (indent + 1)), entry['topic'], str(entry['acls']), str(entry['options']))
return rstr
def __getManagerUids(self, dn):
"""
Returns a list with all uids that can manage the given dn.
"""
index = PluginRegistry.getInstance("ObjectIndex")
res = index.search({'dn': dn, 'manager': {'$ne': [], '$exists': True}}, {'manager': 1})
if res.count():
uids = []
for item in res:
for manager_dn in item['manager']:
p_uid = self.__getUidByDn(manager_dn)
if p_uid:
uids.append(p_uid)
return uids
else:
return None
def __getUidByDn(self, dn):
"""
Returns the uid for a given dn.
"""
index = PluginRegistry.getInstance("ObjectIndex")
res = index.search({'dn': dn, '_type': 'User'}, {'uid': 1})
if res.count() == 1:
return res[0]['uid'][0]
elif res.count() > 1:
raise ValueError(C.make_error("ENTRY_NOT_UNIQUE", dn=dn))
else:
return None
def match(self, user, topic, acls, targetBase, options=None, skip_user_check=False, used_roles=None, override_users=None):
"""
Check if this ``ACL`` object matches the given criteria.
.. warning::
Do NOT use this to validate permissions. Use ACLResolver->check() instead
=============== =============
Key Description
=============== =============
user The user we want to check for. E.g. 'hans'
topic The topic action we want to check for. E.g. 'org.clacks.factory'
acls A string containing the acls we want to check for.
options Special additional options that have to be checked.
skip_user_check Skips checks for users, this is required to resolve roles.
used_roles A list of roles used in this recursion, to be able to check for endless-recursions.
override_users If an acl ises a role, then the original user list will be passed to the roles-match method
to ensure that we can match for the correct list of users.
targetBase To object that was initially checked for (DN)
=============== =============
"""
# Initialize list of already used roles, to avoid recursions
if not used_roles:
used_roles = []
if self.uses_role:
# Roles do not have users themselves, so we need to check
# for the original set of users.
override_users = self.members
# Check for recursions while resolving the acls.
if self.role in used_roles:
raise ACLException(C.make_error("ACL_LOOP", role=self.role))
# Resolve acls used in the role.
used_roles.append(self.role)
r = PluginRegistry.getInstance("ACLResolver")
self.log.debug("checking ACL role entries for role: %s" % self.role)
for acl in r.acl_roles[self.role]:
(match, | |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.263641,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.77863,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0470729,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.239662,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.291835,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.188072,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.303354,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.153123,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.644548,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.170357,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.64886,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0551338,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00788859,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0731378,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.058341,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.128272,
'Execution Unit/Register Files/Runtime Dynamic': 0.0662296,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.165852,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.395951,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.75177,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00120338,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00120338,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00106476,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000421276,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000838073,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00430959,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0109441,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0560847,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.56746,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.182412,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.190489,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.95912,
'Instruction Fetch Unit/Runtime Dynamic': 0.44424,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.024622,
'L2/Runtime Dynamic': 0.00956625,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.52169,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.639103,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.041559,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.041559,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.71794,
'Load Store Unit/Runtime Dynamic': 0.885617,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.102477,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.204955,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0363696,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0367381,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.221812,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0299073,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.440397,
'Memory Management Unit/Runtime Dynamic': 0.0666454,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.3804,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.145032,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0102503,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0932487,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
raise TypeError( # pragma: no cover
"Unexpected type %r." % type(op))
def mark_last_node(self):
"""
Marks the last node as the final output.
"""
if self.last_added_op is None:
raise RuntimeError("last_added_op is None.") # pragma: no cover
self.mark(-1, self.last_added_op)
def mark(self, i, op):
"""
Marks one input or result as an intermediate result
after a full einsum step.
:param op: integer (an input) or an instance of @see cl EinsumSubOp.
"""
if not isinstance(i, int):
raise TypeError( # pragma: no cover
"i must an integer not %r." % type(i))
if i != -1 and i not in self._inputs:
raise RuntimeError( # pragma: no cover
"Input %d was not registered in %r." % (i, self._inputs))
if isinstance(op, EinsumSubOp):
if id(op) not in self._nodes:
raise RuntimeError( # pragma: no cover
"Key %d not found, op=%r." % (id(op), op))
self._mark[i] = op
self._mark[id(op)] = i
self.last_op = op
else:
raise TypeError( # pragma: no cover
"Unexpected type %r." % type(i))
def __iter__(self):
"Iterates on nodes."
for op in self._ops:
yield op
def to_dot(self, **kwargs):
"""
Produces a graph in :epkg:`dot`.
:param kwargs: additional graph option
:return: string
"""
options = {
'orientation': 'portrait',
'ranksep': '0.25',
'nodesep': '0.05',
'width': '0.5',
'height': '0.1',
'size': '5',
'node': '[shape=record]',
}
options.update(kwargs)
def d2s(d):
it = []
for k, v in sorted(d.items()):
it.append("%s=%s" % (k, v))
return " ".join(it)
def d2sd(d):
it = []
for k, v in sorted(d.items()):
if len(v) > 1:
it.append("%s=%s" % (k, ",".join(map(str, v))))
return " ".join(it)
rows = ["digraph{"]
for k, v in options.items():
if isinstance(v, str) and "[" in v:
rows.append("{} {};".format(k, v))
else:
rows.append("{}={};".format(k, v))
for k, v in self._nodes.items():
if isinstance(v, int):
let = [(r, self.metadata['letters'][i])
for i, r in enumerate(self.metadata['mat0'][v])
if r != -1]
dup = self.metadata['duplicates'][v]
if dup is None:
dup = ""
else:
dup = " - %s" % d2sd(dup)
let.sort()
letters = "".join(_[1] for _ in let)
lab = "input %d\\\\n%s\\\\n%s%s" % (
v, letters, str(self.metadata['mat0'][v]), dup)
sk = v
extended_lab = ""
else:
lab = "%s\\\\n%s" % (v.name, d2s(v.kwargs))
sk = id(v)
extended_lab = v.dot_label()
if extended_lab:
extended_lab = "\\\\n" + extended_lab
if sk in self._mark and isinstance(self._mark[sk], int):
la = self._mark[sk]
lab = lab.replace("\\\\n", " - I%d\\\\n" % la)
s = ('%d [label="%s%s" style=filled '
'fillcolor=red];' % (k, lab, extended_lab))
else:
s = '%d [label="%s%s"];' % (k, lab, extended_lab)
rows.append(s)
if not hasattr(v, 'inputs'):
continue
for i in v.inputs:
vid = i if isinstance(i, int) else id(i)
s = "%d -> %d;" % (vid, k)
rows.append(s)
rows.append("}")
return "\n".join(rows)
def apply_sequence(self, *inputs, verbose=False, **kwargs):
"""
Applies a sequence of operations on a list of inputs.
:param inputs: inputs:
:param verbose: prints out intermediate results
:param kwargs: additional parameters,
see :meth:`apply
<mlprodict.testing.einsum.einsum_impl_classes.EinsumSubOp.apply>`.
:return: output
"""
if verbose:
print('######### apply_sequence')
data = {i: inp for i, inp in enumerate(inputs)}
last = None
for op in self:
last = op.apply(data, verbose=verbose, **kwargs)
if last is None:
raise RuntimeError( # pragma: no cover
"Sequence of operations is empty.")
return last
def clean_unused_nodes(self, verbose=False):
"""
Cleans nodes with unused outputs.
:param verbose: display intermediate information
"""
def iteration(it):
# Walks through all nodes.
is_used = {}
for node in self._ops:
if not isinstance(node, EinsumSubOp):
continue
if id(node) not in is_used:
is_used[id(node)] = []
for inp in node.inputs:
if not isinstance(inp, EinsumSubOp):
continue
idn = id(inp)
if idn not in is_used:
is_used[idn] = []
is_used[idn].append(id(node))
# Remove unused nodes.
removed = []
for k, v in is_used.items():
if len(v) == 0:
removed.append(k)
removed = set(removed)
i_rem = []
for i, op in enumerate(self._ops):
if not isinstance(op, EinsumSubOp):
continue
if id(op) in removed and id(op) not in self._mark:
i_rem.append((i, id(op)))
for i, idn in reversed(i_rem):
if verbose:
print("[GraphEinsumSubOp.clean_nodes] remove node "
"i=%d: %d - id=%d" % (it, i, idn))
del self._ops[i]
del self._nodes[idn]
return len(i_rem) > 0
it = 1
while iteration(it):
it += 1
self.last_op = None
self.last_added_op = None
def simplify_mm_nodes(self, verbose=False):
"""
Node name suffixed by `mm` are an artifact to keep
the graph consistent while building it. They can
now be replaced by the equivalent node without suffix `mm`.
:param verbose: display intermediate information
"""
for op in self:
if not isinstance(op, EinsumSubOp):
continue
if op.name.endswith('_mm'):
if verbose:
print("[GraphEinsumSubOp.simplify_mm_nodes] node %r"
" - id=%d" % (op.name, id(op)))
if len(op.inputs) != 2:
raise RuntimeError( # pragma: no cover
"Expecting 2 inputs for node %r not %r id=%r." % (
op.name, len(op.inputs), id(op)))
op.name = op.name[:-3]
op.inputs = op.inputs[:1]
def _get_forward_nodes(self):
"""
Returns the forward nodes.
"""
forward = {}
for op in self:
if isinstance(op, int):
continue
for inp in op.inputs:
key = inp if isinstance(inp, int) else id(inp)
if key in forward:
forward[key].append(op)
else:
forward[key] = [op]
return forward
def _pprint_forward(self):
rows = []
for op in self:
line = "%r <- %s(%s)" % (
id(op), op.name,
", ".join(map(str, [id(_) for _ in op.inputs])))
rows.append(line)
return "\n".join(rows)
def _replace_node_sequence(self, added, deleted):
"""
Removes a sequence of nodes. The method does not check
that the graph remains consistent.
"""
forward = self._get_forward_nodes()
key = id(deleted[-1])
if key not in forward:
raise RuntimeError( # pragma: no cover
"Key {} missing in all forward nodes (other keys {}), "
"all keys:\n{}".format(
key, [id(_) for _ in deleted],
self._pprint_forward()))
# deletion
mark_input = None
for d in deleted:
del self._nodes[id(d)]
if id(d) in self._mark:
del self._mark[id(d)]
dels = []
for k, v in self._mark.items():
if id(v) == id(d):
mark_input = k
dels.append(k)
if len(dels) != 1:
raise RuntimeError( # pragma: no cover
"Input %d has more than one marked operator "
"(%r)." % (id(d), dels))
del self._mark[dels[0]]
dels = set(id(o) for o in deleted)
rem = []
for i, op in enumerate(self._ops):
if id(op) in dels:
rem.append(i)
if len(rem) != len(deleted):
raise RuntimeError( # pragma: no cover
"Mismatched length %r, %r, len=%r." % (
rem, dels, len(deleted)))
for i in reversed(rem):
del self._ops[i]
self.last_add_op = None
# insertion
if added is not None:
self._ops.insert(rem[0], added)
self._nodes[id(added)] = added
for op in forward[key]:
new_inputs = list(op.inputs)
for i in range(len(op.inputs)): # pylint: disable=C0200
if id(op.inputs[i]) == key:
new_inputs[i] = added
op.inputs = tuple(new_inputs)
if mark_input is not None:
self.mark(mark_input, added)
else:
inps = deleted[0].inputs
if len(inps) != 1:
raise RuntimeError( # pragma: no cover
"More than one input. Call another method.")
inp = inps[0]
for op in forward[key]:
new_inputs = list(op.inputs)
for i in range(len(op.inputs)): # pylint: disable=C0200
if id(op.inputs[i]) == key:
new_inputs[i] = inp
op.inputs = tuple(new_inputs)
if mark_input is not None:
self.mark(mark_input, inp)
def remove_duplicate_transpose(self, verbose=False):
"""
Removes consecutive transpose by merging them.
:param verbose: display intermediate information
"""
modif = 1
while modif > 0:
modif = 0
candidates = []
forward = self._get_forward_nodes()
for op in self:
if op.name == "transpose":
inp = op.inputs[0]
if (isinstance(inp, EinsumSubOp) and
inp.name == 'transpose' and
len(forward[id(inp)]) == 1):
candidates.append(op)
if len(candidates) > 0:
modif = 1
# Not efficient to take the first one and to
# start again but the graph should not be too big.
cand = candidates[0]
op2 = cand
op1 = cand.inputs[0]
perm1 = op1.kwargs['perm']
perm2 = op2.kwargs['perm']
if len(perm1) != len(perm2):
raise RuntimeError( # pragma: no cover
"Transposition should have the same length "
"%r, %r." % (perm1, perm2))
perm = list(perm1)
for i in range(len(perm)): # pylint: disable=C0200
perm[i] = perm1[perm2[i]]
if list(range(len(perm))) == perm:
# identity, everything needs to be removed
new_op = None
else:
new_op = op2.__class__(
op2.full_dim, op2.name, op1.inputs[0],
perm=tuple(perm))
self._replace_node_sequence(new_op, [op1, op2])
if verbose:
print("[GraphEinsumSubOp.remove_duplicate_transpose] remove nodes %r"
" - id=%d,%d + %d perm1=%r perm2=%r -> perm=%r" % (
op2.name, id(op1), id(op2),
id(new_op) if new_op is not None else -1,
perm1, perm2, perm))
def to_onnx(self, output, *inputs, dtype=None, verbose=False,
opset=None, **kwargs):
"""
Converts the graph into ONNX.
:param output: output name
:param inputs: input names
:param dtype: type used for all operators
:param | |
<filename>gymV02/qcore/utils/handle_data.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Implement the classes required to handle orders and the access to information
related to books and candles subscribed by an agent
@author: ucaiado
Created on 07/05/2018
'''
import os
import yaml
from collections import namedtuple
import numpy as np
from neutrinogym.qcore import PROD
if not PROD:
from neutrinogym import neutrino
from neutrinogym.neutrino import fx
else:
import neutrino
from neutrino import fx
from .neutrino_utils import (get_begin_time, SymbolSubscriptionError)
from .handle_orders import Instrument
import pdb;
'''
Begin help functions
'''
s_trade_info = 'price qty buyer seller time agr trade_id'
TradeInfo = namedtuple('TradeInfo', s_trade_info)
OrderLevelInfo = namedtuple('OrderLevelInfo', 'price qty order_id broker')
PriceLevelInfo = namedtuple('PriceLevelInfo', 'price qty')
s_order_info = 'neutrino_order symbol side order_id price qty '
s_order_info += 'cum_qty status secondary_order_id time_in_force id is_alive '
s_order_info += 'is_pending'
OrderInfo = namedtuple('OrderInfo', s_order_info)
s_count_info = 'bid_count ask_count trade_count new_bid cancel_bid '
s_count_info += 'replace_bid new_ask cancel_ask replace_ask status_changed'
CountsInfo = namedtuple('CountsInfo', s_count_info)
def fill_orderinfo(order):
return OrderInfo(order, order.symbol, str(order.side), 'None', # order.orderId,
order.current.price, order.current.qty,
order.current.cumQty, str(order.current.status),
order.current.secondaryOrderID,
str(order.current.timeInForce),
order.userData['id'], order.isAlive(), order.isPending())
'''
End help functions
'''
class CandlesHandler(object):
'''
Candles Data Handler
'''
def __init__(self):
'''
Instanciate a CandlesHandler object
'''
self._data_holder = {}
self._alias = {}
self._key_to_alias = {}
self.b_should_load_again = False
def reset(self, this_candle=None, this_conf=None):
'''
Unsubscribe all data
'''
b_t = not isinstance(this_conf, type(None))
if isinstance(this_candle, type(None)):
for this_candle in self._data_holder.values():
for d_conf in this_candle.l_to_subscribe:
try:
fx.unsubscribeIndicator(str(d_conf['symbol']),
str(d_conf['what']),
str(d_conf['conf']),
this_candle.i_lastbgtime)
except IndexError:
pass
del this_candle
self._data_holder = {}
self._alias = {}
self._key_to_alias = {}
else:
for d_conf in this_candle.l_to_subscribe:
if str(d_conf['what']) != 'CANDLE' and b_t:
if isinstance(this_conf, dict):
if sum(
[(d_conf[s_key]!=this_conf[s_key]) * 1
for s_key in this_conf]):
continue
try:
fx.unsubscribeIndicator(str(d_conf['symbol']),
str(d_conf['what']),
str(d_conf['conf']),
this_candle.i_lastbgtime)
except IndexError:
pass
if not b_t:
self._data_holder.pop(this_candle.main_key)
self._alias.pop(self._key_to_alias[this_candle.main_key])
self._key_to_alias.pop(this_candle.main_key)
del this_candle
def check_pending_candles_subscriptions(self, fx_now):
if self.b_should_load_again:
self.b_should_load_again = False
self.on_symbols_load(fx_now)
def subscribe(self, s_symbol, interval, i_nbars, s_alias, s_type=None):
'''
Subscribe the data streaming of a new candle
:param s_symbol: string. symbol to be subscribed
:param interval: CandleIntervals object. The interval of the candle
:param i_nbars: integer.
:param s_alias: string. An alias to the candle
:return: CandleData.
'''
self.b_should_load_again = True
if not isinstance(interval, int):
i_interval = interval.value
else:
i_interval = interval * 60
s_key = '{}:interval={:0.0f}'.format(s_symbol, i_interval)
self._alias[s_alias] = s_key
self._key_to_alias[s_key] = s_alias
if s_key not in self._data_holder:
self._data_holder[s_key] = CandleData(s_symbol, i_interval,
i_nbars)
self._data_holder[s_key].set_name(s_alias)
return self._data_holder[s_key]
def on_symbols_load(self, f_fxnow):
'''
Initiate the Candles attributes when the instrument is synchronized
:param f_fxnow: float. the current time, in seconds
'''
self.b_should_load_again = False
s_txt = None
for this_candle in iter(self._data_holder.values()):
if this_candle.v3_obj:
this_candle.v3_obj.b_ready = True
s_txt = this_candle.on_symbols_load(f_fxnow)
return s_txt
def add_indicator_to(self, this_candle, **kwargs):
'''
Add a new indicator to the candle passed
:param s_ta_name: string. Indicator name (ta-lib)
:param i_time_period: inter. rolling windows length
:param s_input: string. the input used by SMA and EMA
:param s_alias: string. alias to the indicator
:return: CandleData object.
'''
self.b_should_load_again = True
return this_candle.add_indicator(**kwargs)
def get_candle(self, s_alias=None, s_symbol=None, i_interval=None):
'''
Return the candle data related to the the alias or symbol/interval
:param s_symbol: string. symbol to be subscribed
:param i_interval: integer. The interval of the candle, in seconds
:return: CandleData object.
'''
if not s_symbol:
s_key = self._alias.get(s_alias, None)
# if not s_key:
# return None
return self._data_holder.get(s_key, None)
s_key = '{}:interval={:0.0f}'.format(s_symbol, i_interval)
return self._data_holder.get(s_key, None)
def get_current_value(self, this_candle, s_alias, i_iidx=0):
'''
Return the current value of the information required
:param this_candle: CandleData object. Candle to retrive information
:param s_alias: string. alias to the indicator desired
:return: float. The value of the information required
'''
return this_candle.get_value(s_alias, -1, i_inner_idx=i_iidx)
def get_previous_value(self, this_candle, s_alias, i_iidx=0):
'''
Return the previous value of the information required
:param this_candle: CandleData object. Candle to retrive information
:param s_alias: string. alias to the indicator desired
:return: float. The value of the information required
'''
return this_candle.get_value(s_alias, -2, i_inner_idx=i_iidx)
def get_value(self, this_candle, s_alias, i_idx, i_iidx=0):
'''
Return the value of the information required in the index passed
:param this_candle: CandleData object. Candle to retrive information
:param s_alias: string. alias to the indicator desired
:param i_idx: integer. position of the information in array
:return: float. The value of the information required
'''
return this_candle.get_value(s_alias, i_idx, i_inner_idx=i_iidx)
def get_all_values(self, this_candle, s_alias):
'''
Return all values of the information required
:param this_candle: CandleData object. Candle to retrive information
:param s_alias: string. alias to the indicator desired
:return: numpy array. The values of the information required
'''
l_basic = ['HIGH', 'LOW', 'CLOSE', 'OPEN', 'QTY', 'VOLUME', 'TS',
'QTY_BUYER', 'QTY_SELLER', 'CUMQTY_SELLER', 'CUMQTY_BUYER',
'CUMQTY']
s_info2 = ''
if s_alias not in l_basic:
s_name = this_candle._alias.get(s_alias, None)
if not s_name:
return
elif s_alias in l_basic:
s_name = 'CANDLE:{}'.format(this_candle.main_key)
s_info2 = 'CANDLE'
s_info = s_alias
d_data = this_candle.d_candle_data[s_name]
if s_info2 == 'CANDLE':
if len(d_data['data'][s_info]) > 0:
return np.array(d_data['data'][s_info])
else:
if len(d_data['data']) > 0:
return np.array(d_data['data'])
return None
def update(self, hist):
'''
Update data and check if the candle information is complete
:param raw_data: neutrino object.
:return: CandleData object.
'''
l_aux = str(hist.Indicator()).split(':')
d_conf = dict([x.split('=') for x in l_aux[2].split(';')])
s_name = l_aux[1]
# s_interval = l_aux[2].split(';')[0]
s_interval = d_conf['interval']
# s_key = s_name + ':' + s_interval
s_key = s_name + ':interval=' + s_interval
obj_aux = self._data_holder.get(s_key, None)
if isinstance(obj_aux, type(None)):
return None
return obj_aux.update(hist)
class CandleData(object):
'''
Candle Data representation
'''
def __init__(self, s_symbol, i_interval, i_nbars):
'''
Instatiate a CandleData object
:param symbol: string. symbol to be subscribed
:param resol: integer. The lenth of the candle, in seconds
:param window: inter. Numb of candle to return after the first update
:param begin: integer. how long to recover, in seconds
'''
self.s_symbol = s_symbol
self.symbol_name = s_symbol
self.s_conf = 'interval={:0.0f}'.format(i_interval)
self.i_interval = i_interval
self.i_nbars = i_nbars
self.s_name = ''
s_key = 'CANDLE:{}:interval={:0.0f}'.format(s_symbol, i_interval)
self.main_key = s_key.replace('CANDLE:', '')
self.d_candle_data = {s_key: {'ID': 0, 'data': None}}
self._last_idx = 0
self.d_last_ts = {s_key: 0}
self.i_count = 0
self._alias = {}
self.l_to_subscribe = []
self.i_lastbgtime = 0
# control the last tradeID used
self.d_last_tradeid = {s_key: 0}
self.i_last_tradeid = -1
self.count = 1
self.data_updated = 0
self.v3_obj = None
self.l_to_subscribe.append({'symbol': self.s_symbol,
'what': "CANDLE",
'conf': self.s_conf})
def set_name(self, s_name):
'''
'''
self.s_name = s_name
def on_symbols_load(self, f_fxnow):
'''
Subscribe the indicators in the buffer
:param f_fxnow: float. The current time, in seconds
'''
s_txt, i_bgt = get_begin_time(f_fxnow, self.i_nbars, self.i_interval)
self.i_lastbgtime = i_bgt
for d_conf in self.l_to_subscribe:
fx.subscribeIndicator(str(d_conf['symbol']),
str(d_conf['what']),
str(d_conf['conf']),
i_bgt)
self.count = len(self.l_to_subscribe)
return s_txt
def get_value(self, s_alias, i_idx, s_info=None, f_timeperiod=None,
i_inner_idx=0):
'''
Return the desired information related to the candle. f_timeperiod and
s_info are required just if s_alias is None
:param s_alias: string. the alias to the information requested
:param i_idx: integer. the index of the information in values list
:param s_info: string. information requested
:param f_timeperiod: float. timepreriod of the indicator
:param i_inner_idx: integer. inner index of the information desired
:return: float. The value of the information requested
'''
l_basic = ['HIGH', 'LOW', 'CLOSE', 'OPEN', 'QTY', 'VOLUME', 'TS',
'QTY_BUYER', 'QTY_SELLER', 'CUMQTY_SELLER', 'CUMQTY_BUYER',
'CUMQTY']
# if not len(self):
# return None
if s_info:
s_info2 = s_info
if s_info in l_basic:
s_info2 = 'CANDLE'
s_name = '{}:{}:'.format(s_info2, self.s_symbol) + self.s_conf
if f_timeperiod and s_info2 != 'CANDLE':
s_name += ';time_period={:0.0f}'.format(f_timeperiod)
else:
if s_alias not in l_basic:
s_info2 = s_alias
s_name = self._alias.get(s_alias, None)
if not s_name:
return
elif s_alias in l_basic:
s_info2 = 'CANDLE'
s_name = '{}:{}:'.format(s_info2, self.s_symbol) + self.s_conf
s_info = s_alias
if f_timeperiod and s_info2 != 'CANDLE':
s_name += ';time_period={:0.0f}'.format(f_timeperiod)
d_data = self.d_candle_data[s_name]
if s_info2 == 'CANDLE':
if not d_data['data']:
return None
if not d_data['data'][s_info]:
return None
if len(d_data['data'][s_info]) >= abs(i_idx):
return d_data['data'][s_info][i_idx]
else:
if d_data['data'] and len(d_data['data']) >= abs(i_idx):
return d_data['data'][i_idx][i_inner_idx]
return None
def add_indicator(self, **kwargs):
'''
Add a new infdicator to the candle
:param s_ta_name: string. Indicator name (ta-lib)
:param i_time_period: inter. rolling windows length
:param s_alias: string.
:return: Boolean. If it susbcribed new data
'''
# SMA:PETR4:interval=60;time_period=20
s_ta_name = kwargs.get('s_ta_name')
i_time_period = kwargs.get('i_time_period')
s_alias = kwargs.get('s_alias')
s_input = kwargs.get('s_input', None)
i_ma_period = kwargs.get('i_ma_period', None)
# define the basic conf
s_conf = "interval={:0.0f};time_period={:0.0f}"
s_conf = s_conf.format(self.i_interval, i_time_period)
# define input for TAs that accept these variables
if not s_input:
if s_ta_name in ['EMA', 'SMA', 'MOM', | |
<reponame>charlienew/GANCS<gh_stars>10-100
import numpy as np
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
class Model:
"""A neural network model.
Currently only supports a feedforward architecture."""
def __init__(self, name, features):
self.name = name
self.outputs = [features]
def _get_layer_str(self, layer=None):
if layer is None:
layer = self.get_num_layers()
return '%s_L%03d' % (self.name, layer+1)
def _get_num_inputs(self):
return int(self.get_output().get_shape()[-1])
def _glorot_initializer(self, prev_units, num_units, stddev_factor=1.0):
"""Initialization in the style of Glorot 2010.
stddev_factor should be 1.0 for linear activations, and 2.0 for ReLUs"""
stddev = np.sqrt(stddev_factor / np.sqrt(prev_units*num_units))
return tf.truncated_normal([prev_units, num_units],
mean=0.0, stddev=stddev)
def _glorot_initializer_conv2d(self, prev_units, num_units, mapsize, stddev_factor=1.0):
"""Initialization in the style of Glorot 2010.
stddev_factor should be 1.0 for linear activations, and 2.0 for ReLUs"""
stddev = np.sqrt(stddev_factor / (np.sqrt(prev_units*num_units)*mapsize*mapsize))
return tf.truncated_normal([mapsize, mapsize, prev_units, num_units],
mean=0.0, stddev=stddev)
def get_num_layers(self):
return len(self.outputs)
def add_batch_norm(self, scale=False):
"""Adds a batch normalization layer to this model.
See ArXiv 1502.03167v3 for details."""
# TBD: This appears to be very flaky, often raising InvalidArgumentError internally
with tf.variable_scope(self._get_layer_str()):
out = tf.contrib.layers.batch_norm(self.get_output(), scale=scale)
self.outputs.append(out)
return self
def add_flatten(self):
"""Transforms the output of this network to a 1D tensor"""
with tf.variable_scope(self._get_layer_str()):
batch_size = int(self.get_output().get_shape()[0])
out = tf.reshape(self.get_output(), [batch_size, -1])
self.outputs.append(out)
return self
def add_dense(self, num_units, stddev_factor=1.0):
"""Adds a dense linear layer to this model.
Uses Glorot 2010 initialization assuming linear activation."""
assert len(self.get_output().get_shape()) == 2, "Previous layer must be 2-dimensional (batch, channels)"
with tf.variable_scope(self._get_layer_str()):
prev_units = self._get_num_inputs()
# Weight term
initw = self._glorot_initializer(prev_units, num_units,
stddev_factor=stddev_factor)
weight = tf.get_variable('weight', initializer=initw)
# Bias term
initb = tf.constant(0.0, shape=[num_units])
bias = tf.get_variable('bias', initializer=initb)
# Output of this layer
out = tf.matmul(self.get_output(), weight) + bias
self.outputs.append(out)
return self
def add_sigmoid(self):
"""Adds a sigmoid (0,1) activation function layer to this model."""
with tf.variable_scope(self._get_layer_str()):
prev_units = self._get_num_inputs()
out = tf.nn.sigmoid(self.get_output())
self.outputs.append(out)
return self
def add_softmax(self):
"""Adds a softmax operation to this model"""
with tf.variable_scope(self._get_layer_str()):
this_input = tf.square(self.get_output())
reduction_indices = list(range(1, len(this_input.get_shape())))
acc = tf.reduce_sum(this_input, reduction_indices=reduction_indices, keep_dims=True)
out = this_input / (acc+FLAGS.epsilon)
#out = tf.verify_tensor_all_finite(out, "add_softmax failed; is sum equal to zero?")
self.outputs.append(out)
return self
def add_relu(self):
"""Adds a ReLU activation function to this model"""
with tf.variable_scope(self._get_layer_str()):
out = tf.nn.relu(self.get_output())
self.outputs.append(out)
return self
def add_elu(self):
"""Adds a ELU activation function to this model"""
with tf.variable_scope(self._get_layer_str()):
out = tf.nn.elu(self.get_output())
self.outputs.append(out)
return self
def add_lrelu(self, leak=.2):
"""Adds a leaky ReLU (LReLU) activation function to this model"""
with tf.variable_scope(self._get_layer_str()):
t1 = .5 * (1 + leak)
t2 = .5 * (1 - leak)
out = t1 * self.get_output() + \
t2 * tf.abs(self.get_output())
self.outputs.append(out)
return self
def add_conv2d(self, num_units, mapsize=1, stride=1, stddev_factor=1.0):
"""Adds a 2D convolutional layer."""
assert len(self.get_output().get_shape()) == 4 and "Previous layer must be 4-dimensional (batch, width, height, channels)"
with tf.variable_scope(self._get_layer_str()):
prev_units = self._get_num_inputs()
# Weight term and convolution
initw = self._glorot_initializer_conv2d(prev_units, num_units,
mapsize,
stddev_factor=stddev_factor)
weight = tf.get_variable('weight', initializer=initw)
out = tf.nn.conv2d(self.get_output(), weight,
strides=[1, stride, stride, 1],
padding='SAME')
# Bias term
initb = tf.constant(0.0, shape=[num_units])
bias = tf.get_variable('bias', initializer=initb)
out = tf.nn.bias_add(out, bias)
self.outputs.append(out)
return self
def add_conv2d_transpose(self, num_units, mapsize=1, stride=1, stddev_factor=1.0):
"""Adds a transposed 2D convolutional layer"""
assert len(self.get_output().get_shape()) == 4 and "Previous layer must be 4-dimensional (batch, width, height, channels)"
with tf.variable_scope(self._get_layer_str()):
prev_units = self._get_num_inputs()
# Weight term and convolution
initw = self._glorot_initializer_conv2d(prev_units, num_units,
mapsize,
stddev_factor=stddev_factor)
weight = tf.get_variable('weight', initializer=initw)
weight = tf.transpose(weight, perm=[0, 1, 3, 2])
prev_output = self.get_output()
output_shape = [FLAGS.batch_size,
int(prev_output.get_shape()[1]) * stride,
int(prev_output.get_shape()[2]) * stride,
num_units]
out = tf.nn.conv2d_transpose(self.get_output(), weight,
output_shape=output_shape,
strides=[1, stride, stride, 1],
padding='SAME')
# Bias term
initb = tf.constant(0.0, shape=[num_units])
bias = tf.get_variable('bias', initializer=initb)
out = tf.nn.bias_add(out, bias)
self.outputs.append(out)
return self
def add_residual_block(self, num_units, mapsize=3, num_layers=2, stddev_factor=1e-3):
"""Adds a residual block as per Arxiv 1512.03385, Figure 3"""
assert len(self.get_output().get_shape()) == 4 and "Previous layer must be 4-dimensional (batch, width, height, channels)"
# Add projection in series if needed prior to shortcut
if num_units != int(self.get_output().get_shape()[3]):
self.add_conv2d(num_units, mapsize=1, stride=1, stddev_factor=1.)
bypass = self.get_output()
# Residual block
for _ in range(num_layers):
self.add_batch_norm()
self.add_relu()
self.add_conv2d(num_units, mapsize=mapsize, stride=1, stddev_factor=stddev_factor)
self.add_sum(bypass)
return self
def add_bottleneck_residual_block(self, num_units, mapsize=3, stride=1, transpose=False):
"""Adds a bottleneck residual block as per Arxiv 1512.03385, Figure 3"""
assert len(self.get_output().get_shape()) == 4 and "Previous layer must be 4-dimensional (batch, width, height, channels)"
# Add projection in series if needed prior to shortcut
if num_units != int(self.get_output().get_shape()[3]) or stride != 1:
ms = 1 if stride == 1 else mapsize
#bypass.add_batch_norm() # TBD: Needed?
if transpose:
self.add_conv2d_transpose(num_units, mapsize=ms, stride=stride, stddev_factor=1.)
else:
self.add_conv2d(num_units, mapsize=ms, stride=stride, stddev_factor=1.)
bypass = self.get_output()
# Bottleneck residual block
self.add_batch_norm()
self.add_relu()
self.add_conv2d(num_units//4, mapsize=1, stride=1, stddev_factor=2.)
self.add_batch_norm()
self.add_relu()
if transpose:
self.add_conv2d_transpose(num_units//4,
mapsize=mapsize,
stride=1,
stddev_factor=2.)
else:
self.add_conv2d(num_units//4,
mapsize=mapsize,
stride=1,
stddev_factor=2.)
self.add_batch_norm()
self.add_relu()
self.add_conv2d(num_units, mapsize=1, stride=1, stddev_factor=2.)
self.add_sum(bypass)
return self
def add_sum(self, term):
"""Adds a layer that sums the top layer with the given term"""
with tf.variable_scope(self._get_layer_str()):
prev_shape = self.get_output().get_shape()
term_shape = term.get_shape()
#print("%s %s" % (prev_shape, term_shape))
assert prev_shape == term_shape and "Can't sum terms with a different size"
out = tf.add(self.get_output(), term)
self.outputs.append(out)
return self
def add_mean(self):
"""Adds a layer that averages the inputs from the previous layer"""
with tf.variable_scope(self._get_layer_str()):
prev_shape = self.get_output().get_shape()
reduction_indices = list(range(len(prev_shape)))
assert len(reduction_indices) > 2 and "Can't average a (batch, activation) tensor"
reduction_indices = reduction_indices[1:-1]
out = tf.reduce_mean(self.get_output(), reduction_indices=reduction_indices)
self.outputs.append(out)
return self
def add_upscale(self, size=None):
"""Adds a layer that upscales the output by 2x through nearest neighbor interpolation"""
prev_shape = self.get_output().get_shape()
if size is None:
size = [2 * int(s) for s in prev_shape[1:3]]
out = tf.image.resize_nearest_neighbor(self.get_output(), size)
self.outputs.append(out)
return self
def add_concat(self, layer_add):
last_layer = self.get_output()
prev_shape = last_layer.get_shape()
try:
out = tf.concat(axis = 3, values = [last_layer, layer_add])
self.outputs.append(out)
except:
print('fail to concat {0} and {1}'.format(last_layer, layer_add))
return self
def add_layer(self, layer_add):
self.outputs.append(layer_add)
return self
def get_output(self):
"""Returns the output from the topmost layer of the network"""
return self.outputs[-1]
def get_variable(self, layer, name):
"""Returns a variable given its layer and name.
The variable must already exist."""
scope = self._get_layer_str(layer)
collection = tf.get_collection(tf.GraphKeys.VARIABLES, scope=scope)
# TBD: Ugly!
for var in collection:
if var.name[:-2] == scope+'/'+name:
return var
return None
def get_all_layer_variables(self, layer):
"""Returns all variables in the given layer"""
scope = self._get_layer_str(layer)
return tf.get_collection(tf.GraphKeys.VARIABLES, scope=scope)
def _discriminator_model(sess, features, disc_input, layer_output_skip=5, hybrid_disc=0):
# update 05092017, hybrid_disc consider whether to use hybrid space for discriminator
# to study the kspace distribution/smoothness properties
# Fully convolutional model
mapsize = 3
layers = [8, 16, 32, 64]#[64, 128, 256, 512]
old_vars = tf.global_variables()#tf.all_variables() , all_variables() are deprecated
# get discriminator input
disc_hybird = 2 * disc_input - 1
print(hybrid_disc, 'discriminator input dimensions: {0}'.format(disc_hybird.get_shape()))
model = Model('DIS', disc_hybird)
# discriminator network structure
for layer in range(len(layers)):
nunits = layers[layer]
stddev_factor = 2.0
model.add_conv2d(nunits, mapsize=mapsize, stride=2, stddev_factor=stddev_factor)
model.add_batch_norm()
model.add_relu()
# Finalization a la "all convolutional net"
model.add_conv2d(nunits, mapsize=mapsize, stride=1, stddev_factor=stddev_factor)
model.add_batch_norm()
model.add_relu()
model.add_conv2d(nunits, mapsize=1, stride=1, stddev_factor=stddev_factor)
model.add_batch_norm()
model.add_relu()
# Linearly map to real/fake and return average score
# (softmax will be applied later)
model.add_conv2d(1, mapsize=1, stride=1, stddev_factor=stddev_factor)
model.add_mean()
new_vars = tf.global_variables()#tf.all_variables() , all_variables() are deprecated
disc_vars = list(set(new_vars) - set(old_vars))
#select output
output_layers = [model.outputs[0]] + model.outputs[1:-1][::layer_output_skip] + [model.outputs[-1]]
return model.get_output(), disc_vars, output_layers
def conv(batch_input, out_channels, stride=2, size_kernel=4):
with tf.variable_scope("conv"):
in_channels = batch_input.get_shape()[3]
filter = tf.get_variable("filter", [size_kernel, size_kernel, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, in_channels, out_channels]
# => [batch, out_height, out_width, out_channels]
padded_input = tf.pad(batch_input, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="CONSTANT")
conv = tf.nn.conv2d(padded_input, filter, [1, stride, stride, 1], padding="VALID")
return conv
def deconv(batch_input, out_channels, size_kernel=3):
with tf.variable_scope("deconv"):
batch, in_height, in_width, in_channels = [int(d) for d in batch_input.get_shape()]
filter = tf.get_variable("filter", [size_kernel, size_kernel, out_channels, in_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, out_channels, in_channels]
# => [batch, out_height, out_width, out_channels]
conv = tf.nn.conv2d_transpose(batch_input, filter, [batch, in_height * 2, in_width * 2, out_channels], [1, 2, 2, 1], padding="SAME")
return conv
def lrelu(x, a):
with tf.name_scope("lrelu"):
# adding these together creates the leak part and linear part
# then cancels them | |
<gh_stars>0
'''
Plot a variable (XYZ coord, ZTD, residuals) vs. time with TRACE file as input
Input:
trace_file - .TRACE file location from pea - str
output_dir - output_directory for plots - str
Plotting Options
-PPP - Plot XYZ Coords in PPP mode - str
-SPP - Plot XYZ Coords in SPP mode - str
-ZTD - Plot ZTD Estimates from PPP - str
-RES - Plot residuals of all satellites - str
-resid_sats - Plot residuals of list of satellites - str
in comma separated format,
e.g. G02,G05,G32,G25,G15
-resid_LC - Choose linear combination plotted - str
default: L1L2
options: L1L2, L1L5
-resid_codpha - Choose code or phase plotted - str
default: Code
options: Code, Phase, or Code,Phase
-resid_fit - Choose pre or post fit plotted - str
default: Postfit
options: Prefit, Postfit, or Prefit,Postfit
Output:
Time-series plot/s of the variable/s saved in output_dir
<NAME> (based on code developed by <NAME> at Geoscience Australia)
2020-12-03 15:34
'''
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
from pathlib import Path
from numpy import loadtxt
import argparse
from datetime import datetime, timedelta
from gn_lib.gn_io.trace import _read_trace, _read_trace_res, _read_trace_LC, _read_trace_el
from gn_lib.gn_io.sinex import _read_snx_solution
from gn_lib.gn_io.trop import _read_tro_solution
from gn_lib.gn_const import J2000_ORIGIN
from gn_lib.gn_datetime import yydoysec2datetime as _yydoysec2datetime
from gn_lib.gn_datetime import j20002datetime as _j20002datetime
# Old trace file parsing function - still needed for SPP
#==============================================================================
def parseTRACEfile(tracefile): #, ds, satels):
# Final dict
output = {}
# Placeholder for the datetime of Epoch 1
epoch1_dt = 0
# Open the TRACE file:
with open(tracefile) as f:
# Run through line by line
for line in f:
# Removing any trailing space
line = line.rstrip()
if 'station : ' in line:
output['station'] = line.split(':')[1]
# Now search through for any time a new Epoch block starts and save State and Residual info
if '################# Starting Epoch' in line:
parts = line.split(' ')
# Get the Epoch number:
for part in parts:
try:
epoch = int(part)
except ValueError:
continue
line = f.readline()
line = line.rstrip()
# Before reaching the end of the Epoch block, save all the info:
while '- States' not in line:
line = f.readline()
# First, find the start datetime of the processing, i.e. Epoch 1
if (epoch1_dt == 0) & ('*1 PDE-CS GPST' in line):
parts = line.split(' ')
nums = []
for part in parts:
try:
nums.append(float(part))
except ValueError:
continue
# GPS-Time start point:
gps_st = datetime.strptime('1980 006','%Y %j')
# Add number of weeks and seconds to start point to get Epoch 1:
epoch1_dt = gps_st + timedelta(weeks = nums[0], seconds = nums[1])
# Save to output
output['Epoch1'] = epoch1_dt
# If spp info, record:
if '*3 sppos sol:' in line:
if 'sppPos' not in output:
output['sppPos'] = {}
output['sppPos']['X'] = []
output['sppPos']['Y'] = []
output['sppPos']['Z'] = []
parts = line.split(' ')
nums = []
for part in parts:
try:
nums.append(float(part))
except ValueError:
continue
output['sppPos']['X'].append(nums[0])
output['sppPos']['Y'].append(nums[1])
output['sppPos']['Z'].append(nums[2])
else:
pass
return output
def calc_diff_lag(y, x, lag):
''' Calculate y[n+1] - y[n] up to (n-1), and output new_y
'''
new_y = y[lag:] - y[:-lag]
if len(x) == len(new_y):
return new_y,x
else:
x = x[:-lag]
return new_y,x
def resid_plot(
data,
station,
LC,
cod_or_ph,
fit,
sats,
st_epoch = 0,
en_epoch = None,
xlim1 = None,
xlim2 = None,
ymin = None,
ymax = None,
flex_st = None,
flex_en = None,
save_fig = False,
show = False,
diff_lag = 0):
"""
Plot of the Residuals for given inputs of Linear-Combination type,
code/phase, pre/post fit, and sats
Input
data - Results to be plotted (dict from parseTRACEfile) - dict
station - The station of interest - str
LC - Options being 'L1L2' and 'L1L5' - str
cod_or_ph - Code or Phase data (to plot) - str
fit - Type of residual to plot: either 'Postfit' or 'Prefit' - str
sat - List of satellites to plot - list of str
st_epoch - If plotting is to start at epoch other than Epoch 1 - int
OPTIONAL
If a portion of the plot is to be highlight to show a flex event, need
start and end epochs
flex_st - Epoch number where flex event starts - int
flex_en - Epoch number where flex event ends - int
If the figure is to be saved, set save_fig = True, this will save to pwd
Output
Zenith Tropospheric Delay Plot
"""
fig1,ax1 = plt.subplots(1,1, figsize = (12,6) )
# Get Epoch info:
epochs = data.index.get_level_values(1).values
epoch1 = J2000_ORIGIN + np.timedelta64(epochs[st_epoch],'s')
if en_epoch == None:
epochN = J2000_ORIGIN + np.timedelta64(epochs[-1],'s')
else:
epochN = J2000_ORIGIN + np.timedelta64(epochs[en_epoch],'s')
date_str = epoch1.astype(datetime).strftime("%Y-DOY-%j")+'-'+epochN.astype(datetime).strftime("%j")
#epoch1 = data['Epoch1']
alph = 0.95
for sat in sats:
sat_mask = (data['PRN'] == sat)
if cod_or_ph == 'Code':
cp = 'P'
elif cod_or_ph == 'Phase':
cp = 'L'
if LC == 'L1L2':
cp += '12'
elif LC == 'L1L5':
cp += '15'
cp_mask = (data.index.get_level_values(0) == cp)
data2plot = data[sat_mask & cp_mask]
y = np.array(data2plot[fit.upper()])
data2plot = data2plot.reset_index(level=[0,1])
x = np.array(data2plot['level_1'])
x_times = J2000_ORIGIN + np.array(x,dtype='timedelta64[s]')
if diff_lag:
y,x_times = calc_diff_lag(y,x_times,diff_lag)
ax1.plot(x_times[st_epoch:en_epoch], y[st_epoch:en_epoch], label=sat, alpha=alph)
alph *= 0.88
# If indices given for start and end to flex event given, highlight that part
if flex_st:
ax1.axvspan(flex_st, flex_en, alpha=0.1, color='red')
ax1.set_ylim(ymin=ymin,ymax=ymax)
ax1.set_ylabel('Residual (m)')
# Formating x-axis
myFmt = mdates.DateFormatter('%b-%d %H:%M')
ax1.xaxis.set_major_formatter(myFmt)
fig1.autofmt_xdate()
#ax1.set_xlabel(f'Epoch Number (Epoch 1: {epoch1.strftime("%Y-DOY-%j %H:%M:%S")})')
ax1.grid(True)
ax1.legend()
if xlim1:
ax1.set_xlim(xmin=xlim1, xmax=xlim2)
ax1.title.set_text(f'{station} - {LC} - {cod_or_ph} {fit} Residual Plot - {date_str} Epoch {xlim1}-{xlim2}')
if save_fig:
f_save = f'{save_fig}{date_str}_{station}-{LC}-_{cod_or_ph}_{fit}_Residuals__Epoch-{xlim1}-{xlim2}.png'
fig1.savefig(f_save)
print(f'Saved: {f_save}')
elif st_epoch !=0:
ax1.title.set_text(f'{station} - {LC} - {cod_or_ph} {fit} Residual Plot - {date_str} Epoch {st_epoch}-end')
if save_fig:
f_save = f'{save_fig}{date_str}_{station}-{LC}-_{cod_or_ph}_{fit}_Residuals_St_Epoch-{st_epoch}.png'
fig1.savefig(f_save)
print(f'Saved: {f_save}')
else:
ax1.title.set_text(f'{station} - {LC} - {cod_or_ph} {fit} Residual Plot - {date_str}')
if save_fig:
f_save = f'{save_fig}{date_str}_{station}-{LC}-_{cod_or_ph}_{fit}_Residuals.png'
fig1.savefig(f_save)
print(f'Saved: {f_save}')
if show:
fig1.show()
return fig1,ax1
def pos_plot(
data,
station,
st_epoch = 0,
en_epoch = None,
flex_st = None,
flex_en = None,
save_fig = False,
show = False,
diff_lag = 0):
"""
Coordinate (X,Y,Z) plot using the data from the parseTRACEfile function and named station.
This is a 3 panel plot, each running horizontally and in X, Y, Z order from top to bottom
Input
data - Results to be plotted (dict from parseTRACEfile) - dict
station - The station of interest - str
st_epoch - If plotting is to start at some epoch other than Epoch 1 (optional) - int
OPTIONAL
If a portion of the plot is to be highlight to show a flex event, need start and end epochs
flex_st - Epoch number where flex event starts - int
flex_en - Epoch number where flex event ends - int
If the figure is to be saved, set save_fig = True, this will save to pwd
Output
PPP POS Plot
"""
# Get Epoch info:
epochs = data.index.get_level_values(1).values
epoch1 = J2000_ORIGIN + np.timedelta64(epochs[st_epoch],'s')
date_str = epoch1.astype(datetime).strftime("%Y-DOY-%j")
# Set up figure
fig2,ax2 = plt.subplots(3,1, figsize = (18,14) )
# Get y, x and error ues for the X coord plot
yX = data.EST.X.values[st_epoch:en_epoch]
yerrX = np.sqrt(data.VAR.X.values[st_epoch:en_epoch])
x = epochs[st_epoch:en_epoch]
x_times = J2000_ORIGIN + np.array(x,dtype='timedelta64[s]')
# Plot the x and y values for the top most subplot, including error range
if diff_lag:
yX,x_times = calc_diff_lag(yX,x_times,diff_lag)
ax2[0].plot(x_times,yX)
ax2[0].fill_between(x_times, yX-yerrX, yX+yerrX,alpha=0.2)
# If indices given for start and end to flex event give, highlight that part
if flex_st:
ax2[0].axvspan(flex_st, flex_en, alpha=0.2, color='red')
# Label the subplot
ax2[0].set_ylabel('X Coordinate (m)')
ax2[0].tick_params(labelbottom=False)
ax2[0].title.set_text('X')
ax2[0].grid(True)
# Get y and error values for the Y coord plot
yY = data.EST.Y.values[st_epoch:en_epoch]
yerrY = np.sqrt(data.VAR.Y.values[st_epoch:en_epoch])
# Plot the x and y values for the middle subplot, including error range
if diff_lag:
yY,x_times = calc_diff_lag(yY,x_times,diff_lag)
ax2[1].plot(x_times,yY)
ax2[1].fill_between(x_times, yY-yerrY, yY+yerrY,alpha=0.2)
# If indices given for start and end to flex event give, | |
<reponame>MinervaVilchez/tytus<filename>parser/team25/code/astFunciones.py
from astExpresion import Expresion, ExpresionCadena, ExpresionID, ExpresionNumero, TIPO_DE_DATO
from reporteErrores.errorReport import ErrorReport # EN EL AMBITO MAS EXTERIOR SE INGRESAN A LA LISTA , EN ESTAS SUB CLASES SOLO SE SUBE EL ERROR
import math
import hashlib
import random
from datetime import datetime
class FuncionNumerica(Expresion):
def __init__(self, funcion, parametro1=None, parametro2=None, linea = 0):
self.parametro1 = parametro1
self.parametro2 = parametro2
self.funcion = funcion.upper()
self.linea = linea
def dibujar(self):
identificador = str(hash(self))
nodo = "\n" + identificador + "[ label =\"" + self.funcion + "\" ];"
# Retorno
if self.parametro1:
nodo += "\n" + identificador + " -> " + \
str(hash(self.parametro1)) + ";"
nodo += self.parametro1.dibujar()
if self.parametro2:
nodo += "\n" + identificador + " -> " + \
str(hash(self.parametro2)) + ";"
nodo += self.parametro2.dibujar()
return nodo
def getTipo(self , valorNumerico):
if (valorNumerico % 1) == 0:
return TIPO_DE_DATO.ENTERO
else:
return TIPO_DE_DATO.DECIMAL
def metodo_width_bucket(self , lista_numeros , ts):
if len(lista_numeros) == 4:
#EXPRESION , MIN_VALLUE , MAX_VALUE , N_BLOQUES
valorPrueba = lista_numeros[0].ejecutar(ts)
if isinstance(valorPrueba , ErrorReport):
return valorPrueba
rangoInicial = lista_numeros[1].ejecutar(ts)
if isinstance(rangoInicial , ErrorReport):
return rangoInicial
rangoMax = lista_numeros[2].ejecutar(ts)
if isinstance(rangoMax , ErrorReport):
return rangoMax
nBloques = lista_numeros[3].ejecutar(ts)
if isinstance(nBloques , ErrorReport):
return nBloques
# validacion de tipo numerico ,
if not isinstance(valorPrueba , ExpresionNumero):
return ErrorReport('sintactico', 'Error solo se acepta un tipo numero' ,self.linea)
if not isinstance(rangoInicial, ExpresionNumero):
return ErrorReport('sintactico', 'Error solo se acepta un tipo numero' ,self.linea)
if not isinstance(rangoMax, ExpresionNumero):
return ErrorReport('sintactico', 'Error solo se acepta un tipo numero' ,self.linea)
if not isinstance(nBloques, ExpresionNumero):
return ErrorReport('sintactico', 'Error solo se acepta un tipo numero' ,self.linea)
if not self.getTipo(nBloques.val) == TIPO_DE_DATO.ENTERO:
return ErrorReport('sintactico', 'Error solo se acepta un ENTERO en el ultimo parametro' ,self.linea)
# dando los meros valores
valorPrueba = valorPrueba.val
rangoInicial = rangoInicial.val
rangoMax = rangoMax.val
nBloques = nBloques.val
if valorPrueba >= rangoMax:
return ExpresionNumero(1 + nBloques,TIPO_DE_DATO.ENTERO,self.linea)
elif valorPrueba < rangoInicial:
return ExpresionNumero( (1-1)*0 , TIPO_DE_DATO.ENTERO,self.linea)
else:
diferencia = rangoMax-rangoInicial
subIntervalos = diferencia/nBloques # _ , _ , _ , _
auxUbicacion = 1
aux = rangoInicial + subIntervalos
while(not valorPrueba < aux):
#print(str(aux - subIntervalos) +' , ' + str(aux))
aux += subIntervalos
auxUbicacion +=1
return ExpresionNumero(auxUbicacion, TIPO_DE_DATO.ENTERO,self.linea)
else:
return ErrorReport('sintactico', 'error en width_bucket se esperaba solo 4 parametros' ,self.linea)
def ejecutar(self, ts = None):
if self.parametro2 != None: # 2 PARAMETROS
nodoSyn1 = self.parametro1.ejecutar(ts)
if isinstance(nodoSyn1 , ErrorReport):
return nodoSyn1
nodoSyn2 = self.parametro2.ejecutar(ts)
if isinstance(nodoSyn2 , ErrorReport):
return nodoSyn2
if isinstance(nodoSyn1 , ExpresionNumero) and isinstance(nodoSyn2 , ExpresionNumero):
if self.funcion == "ATAN2":
rads = math.atan2(nodoSyn1.val,nodoSyn2.val)
return ExpresionNumero(rads,self.getTipo(rads),self.linea)
if self.funcion == "ATAN2D":
rads = math.atan2(nodoSyn1.val,nodoSyn2.val)
grados = (rads * 180/math.pi)
return ExpresionNumero(grados,self.getTipo(grados),self.linea)
if self.funcion == "DIV":
izq = nodoSyn1.val
der = nodoSyn2.val
if der == 0:
return ErrorReport('semantico', 'error DIVISION entre 0' ,self.linea)
valor = izq/der
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion =="GCD":
valor = math.gcd(nodoSyn1.val , nodoSyn2.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion =="MOD":
try:
valor = (nodoSyn1.val % nodoSyn2.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
except:
return ErrorReport('semantico', 'error en MODULO' ,self.linea)
if self.funcion =="POWER":
try:
valor = (nodoSyn1.val ** nodoSyn2.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
except:
return ErrorReport('semantico', 'error en MODULO' ,self.linea)
if self.funcion =="ROUND":
valor = round(nodoSyn1.val , nodoSyn2.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion == "TRUNC":
if self.getTipo(nodoSyn2.val) != TIPO_DE_DATO.ENTERO:
return ErrorReport('semantico', 'error en Metodo TRUNC el segundo parametro tiene que ser un entero' ,self.linea)
cadenaInt = str(nodoSyn1.val)
numero_truncado = ''
indice = 0
decimalesAdjuntados = 0
for i in range(len(cadenaInt)):
if cadenaInt[i] == '.':
numero_truncado += cadenaInt[i]
indice = i
break
else:
numero_truncado += cadenaInt[i]
indice+=1
while(decimalesAdjuntados < nodoSyn2.val):
if indice < len(cadenaInt):
numero_truncado += cadenaInt[indice]
else:
numero_truncado += '0'
indice+=1
decimalesAdjuntados+=1
valor = float(numero_truncado)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
else:
return ErrorReport('semantico', 'error de tipo, se esperaba un ENTERO O DECIMAL' ,self.linea)
elif self.parametro1 != None: # 1 PARAMETRO
if isinstance(self.parametro1,list):
if self.funcion =="WIDTH_BUCKET":
return self.metodo_width_bucket(self.parametro1,ts)
nodoSyn1 = self.parametro1.ejecutar(ts)
if isinstance(nodoSyn1 , ErrorReport):
return nodoSyn1
if isinstance(nodoSyn1 , ExpresionNumero): # decimales y eneteros
if self.funcion == "ACOS": # RADIANEES
if nodoSyn1.val > 1 or nodoSyn1.val < 1:
return ErrorReport('semantico', 'Error en ACOS ,el parametro debe estar entre -1 y 1' ,self.linea)
return ExpresionNumero(math.acos(nodoSyn1.val),self.getTipo(nodoSyn1.val),self.linea)
if self.funcion == "ACOSD": # GRADOS
if nodoSyn1.val > 1 or nodoSyn1.val < 1:
return ErrorReport('semantico', 'Error en ACOSD , el parametro debe estar entre -1 y 1' ,self.linea)
rads = math.acos(nodoSyn1.val)
grados = (rads * 180/math.pi)
return ExpresionNumero(grados,self.getTipo(grados),self.linea)
if self.funcion == "ASIN":
if nodoSyn1.val > 1 or nodoSyn1.val < 1:
return ErrorReport('semantico', 'Error en ASIN ,el parametro debe estar entre -1 y 1' ,self.linea)
valor = math.asin(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion == "ASIND":
if nodoSyn1.val > 1 or nodoSyn1.val < 1:
return ErrorReport('semantico', 'Error en ASIND ,el parametro debe estar entre -1 y 1' ,self.linea)
rads = math.asin(nodoSyn1.val)
grados = (rads * 180/math.pi)
return ExpresionNumero(grados,self.getTipo(grados),self.linea)
if self.funcion == "ATAN":
try:
rads = math.atan(nodoSyn1.val)
return ExpresionNumero(rads,self.getTipo(rads),self.linea)
except:
return ErrorReport('semantico', 'Error en ATAN por el valor del parametro ' ,self.linea)
if self.funcion == "ATAND":
try:
rads = math.atan(nodoSyn1.val)
grados = (rads * 180/math.pi)
return ExpresionNumero(grados,self.getTipo(grados),self.linea)
except:
return ErrorReport('semantico', 'Error en ATAND por el valor del parametro ' ,self.linea)
if self.funcion == "COS":
valor = math.cos(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion == "COSD":
rads = math.cos(nodoSyn1.val)
grados = (rads * 180/math.pi)
return ExpresionNumero(grados,self.getTipo(grados),self.linea)
if self.funcion == "COT":
tangente=math.tan(nodoSyn1.val)
if tangente == 0:
return ErrorReport('semantico', 'Error en COT por el valor del parametro ' ,self.linea)
cot = 1 / tangente
return ExpresionNumero(cot,self.getTipo(cot),self.linea)
if self.funcion == "COTD":
tangente=math.tan(nodoSyn1.val)
if tangente == 0:
return ErrorReport('semantico', 'Error en COTD por el valor del parametro ' ,self.linea)
cot =math.degrees(1 / tangente)
return ExpresionNumero(cot,self.getTipo(cot),self.linea)
if self.funcion == "SIN":
radianes=math.sin(nodoSyn1.val)
return ExpresionNumero(radianes,self.getTipo(radianes),self.linea)
if self.funcion == "SIND":
grados=math.degrees(math.sin(nodoSyn1.val))
return ExpresionNumero(grados,self.getTipo(grados),self.linea)
if self.funcion == "TAN":
try:
radianes=math.tan(nodoSyn1.val)
return ExpresionNumero(radianes,self.getTipo(radianes),self.linea)
except:
return ErrorReport('semantico', 'Error en TAN por el valor del parametro ' ,self.linea)
if self.funcion == "TAND":
try:
grados=math.degrees(math.tan(nodoSyn1.val))
return ExpresionNumero(grados,self.getTipo(grados),self.linea)
except:
return ErrorReport('semantico', 'Error en TAND por el valor del parametro ' ,self.linea)
if self.funcion == "COSH":
try:
valor=math.cosh(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
except:
return ErrorReport('semantico', 'Error en COSH por el valor del parametro ' ,self.linea)
if self.funcion == "SINH":
try:
valor=math.sinh(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
except:
return ErrorReport('semantico', 'Error en SINH por el valor del parametro ' ,self.linea)
if self.funcion == "TANH":
try:
valor=math.tanh(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
except:
return ErrorReport('semantico', 'Error en TANH por el valor del parametro ' ,self.linea)
if self.funcion == "ACOSH":
if nodoSyn1.val < 1:
return ErrorReport('semantico', 'Error en ACOSH, el parametro debe de ser mayor o igual a 1 ' ,self.linea)
valor=math.acosh(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion == "ASINH":
valor=math.asinh(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion == "ATANH":
if nodoSyn1.val > 0.99 or nodoSyn1.val < -0.99:
return ErrorReport('semantico', 'Error en ATANH, el parametro debe estar entre 0.99 y -0.99 ' ,self.linea)
valor=math.atanh(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
#_________________________________ fin de trigonometricas
if self.funcion == "ABS":
valor=math.fabs(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion == "CBRT": #RAIZ CUBICA SOLO PARA ENTREROS
if (nodoSyn1.val % 1) == 0:
valor = raizCubica(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
else:
return ErrorReport('semantico', 'error CBRT solo recibe enteros, NO decimales' ,self.linea)
if self.funcion =="CEIL" or self.funcion == "CEILING":
valor = math.ceil(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion =="DEGREES":
valor = math.degrees(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion =="FACTORIAL":
valor = math.factorial(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion =="FLOOR":# POR SI VIENE EN UN INSERT
valor = math.floor(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
if self.funcion =="LN":
try:
valor = math.log(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
except :
return ErrorReport('semantico', 'error en el paramtro de LN' ,self.linea)
if self.funcion =="LOG":
try:
valor = math.log10(nodoSyn1.val)
return ExpresionNumero(valor,self.getTipo(valor),self.linea)
except :
return ErrorReport('semantico', 'error en el paramtro de LOG' ,self.linea)
if self.funcion =="EXP":
try:
valor = | |
# -*- coding: utf-8 -*-
import os
import json
import requests
import configparser
from easydict import EasyDict as edict
from datetime import datetime
from archives_tools.dacs import iso2DACS
import uuid
#funtions for debugging
def pp(output):
try:
print (json.dumps(output, indent=2))
except:
import ast
print (json.dumps(ast.literal_eval(str(output)), indent=2))
def serializeOutput(filePath, output):
f = open(filePath, "w")
try:
f.write(json.dumps(output, indent=2))
except:
f.write(json.dumps(ast.literal_eval(str(output)), indent=2))
f.close
def fields(object):
for key in object.keys():
print (key)
#error handler
def checkError(response):
if not response.status_code == 200:
print ("ERROR: HTTP Response " + str(response.status_code))
try:
pp(response.json())
log = open("aspace.log", "a")
log.write("\n" + str(datetime.now()) + " -- " + "ERROR: HTTP Response " + str(response.status_code) + "\n" + json.dumps(response.json(), indent=2))
log.close()
except:
print (response.status_code)
log = open("aspace.log", "a")
log.write("\n" + str(datetime.now()) + " -- " + "ERROR: HTTP Response " + str(response.status_code))
log.close()
#reads config file for lower functions
def readConfig():
#load config file from user directory
if os.name == "nt":
configPath = os.path.join(os.getenv("APPDATA"), ".aspaceLibrary")
else:
configPath = os.path.join(os.path.expanduser("~"), ".aspaceLibrary")
if not os.path.isdir(configPath):
os.makedirs(configPath)
configFile = os.path.join(configPath, "local_settings.cfg")
config = configparser.ConfigParser()
config.read(configFile)
return config
#writes the config file back
def writeConfig(config):
#load config file from user directory
if os.name == "nt":
configPath = os.path.join(os.getenv("APPDATA"), ".aspaceLibrary")
else:
configPath = os.path.join(os.path.expanduser("~"), ".aspaceLibrary")
if not os.path.isdir(configPath):
os.makedirs(configPath)
configFile = os.path.join(configPath, "local_settings.cfg")
with open(configFile, 'w') as f:
config.write(f)
#basic function to get ASpace login details from a config file
def getLogin(aspaceLogin = None):
if aspaceLogin is None:
try:
config = readConfig()
#make tuple with basic ASpace login info
aspaceLogin = (config.get('ArchivesSpace', 'baseURL'), config.get('ArchivesSpace', 'user'), config.get('ArchivesSpace', 'password'))
except:
raise ValueError("ERROR: No config file present. Enter credentials with setURL(), setPassword(), or use a tuple, like: session = AS.getSession(\"http://localhost:8089\", \"admin\", \"admin\")")
return aspaceLogin
else:
return aspaceLogin
#function to update the URL in the config file
def setURL(URL):
config = readConfig()
if not config.has_section("ArchivesSpace"):
config.add_section('ArchivesSpace')
config.set('ArchivesSpace', 'baseURL', URL)
writeConfig(config)
print ("URL path updated")
#function to update the user in the config file
def setUser(user):
config = readConfig()
if not config.has_section("ArchivesSpace"):
config.add_section('ArchivesSpace')
config.set('ArchivesSpace', 'user', user)
writeConfig(config)
print ("User updated")
#function to update the URL in the config file
def setPassword(password):
config = readConfig()
if not config.has_section("ArchivesSpace"):
config.add_section('ArchivesSpace')
config.set('ArchivesSpace', 'password', password)
writeConfig(config)
print ("Password updated")
#function to get an ArchivesSpace session
def getSession(aspaceLogin = None):
#get tuple of login details if not provided with one
aspaceLogin = getLogin(aspaceLogin)
#inital request for session
r = requests.post(aspaceLogin[0] + "/users/" + aspaceLogin[1] + "/login", data = {"password":aspaceLogin[2]})
if r.status_code == 403:
print ("ASpace Connection Failed. Response 403, invalid credentials. Please check credentials in local_settings.cfg")
elif r.status_code != 200:
print ("ASpace Connection Failed. Response " + str(r.status_code) + ". Please check settings in local_settings.cfg")
else:
checkError(r)
print ("ASpace Connection Successful")
sessionID = r.json()["session"]
session = {'X-ArchivesSpace-Session':sessionID}
return session
def makeObject(jsonData):
#handles paginated returns
if "results" in jsonData:
jsonData = jsonData["results"]
if isinstance(jsonData, list):
itemList = []
#checks if list of json objects or just a single one
for thing in jsonData:
object = edict(thing)
#object.fields = fields(thing)
#object.json = thing
itemList.append(object)
return itemList
else:
#single json object
object = edict(jsonData)
#object.fields = fields(jsonData)
#object.json = jsonData
return object
################################################################
#GETTING LIST OF LARGE SETS: ACCESSIONS, RESOURCES, etc.
################################################################
def getResourceList(session, repo, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
resourceData= requests.get(aspaceLogin[0] + "/repositories/" + str(repo) + "/resources?all_ids=true", headers=session)
checkError(resourceData)
return resourceData.json()
#get a list of accession numbers
def getAccessionList(session, repo, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
accessionData= requests.get(aspaceLogin[0] + "/repositories/" + str(repo) + "/accessions?all_ids=true", headers=session)
checkError(accessionData)
return accessionData.json()
#get a list of subjects
def getSubjectList(session, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
subjectData= requests.get(aspaceLogin[0] + "/subjects?all_ids=true", headers=session)
checkError(subjectData)
return subjectData.json()
#get a list of top containers
def getContainerList(session, repo, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
containerData= requests.get(aspaceLogin[0] + "/repositories/" + str(repo) + "/top_containers?all_ids=true", headers=session)
checkError(locationData)
return containerData.json()
#get a list of locations
def getLocationList(session, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
locationData= requests.get(aspaceLogin[0] + "/locations?all_ids=true", headers=session)
checkError(locationData)
return locationData.json()
#get a list of digital objects
def getDAOList(session, repo, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
daoData= requests.get(aspaceLogin[0] + "/repositories/" + str(repo) + "/digital_objects?all_ids=true", headers=session)
checkError(daoData)
return daoData.json()
################################################################
#REQUEST FUNCTIONS
################################################################
def singleRequest(session, repo, number, requestType, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
requestData = requests.get(aspaceLogin[0] + "/repositories/" + str(repo) + "/" + requestType + "/" + str(number), headers=session)
checkError(requestData)
returnList = makeObject(requestData.json())
return returnList
def multipleRequest(session, repo, param, requestType, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
#get list of all resources and loop thorugh them
if param.lower().strip() == "all":
if requestType.lower() == "resources":
numberSet = getResourceList(session, repo, aspaceLogin)
elif requestType.lower() == "accessions":
numberSet = getAccessionList(session, repo, aspaceLogin)
elif requestType.lower() == "subjects":
numberSet = getSubjectList(session, aspaceLogin)
elif requestType.lower() == "top_containers":
numberSet = getContainerList(session, repo, aspaceLogin)
elif requestType.lower() == "locations":
numberSet = getLocationList(session, aspaceLogin)
elif requestType.lower() == "digital_objects":
numberSet = getDAOList(session, repo, aspaceLogin)
returnList = []
for number in numberSet:
if requestType.lower() == "subjects" or requestType.lower() == "locations":
requestData = requests.get(aspaceLogin[0] + "/" + requestType + "/" + str(number), headers=session)
else:
requestData = requests.get(aspaceLogin[0] + "/repositories/" + str(repo) + "/" + requestType + "/" + str(number), headers=session)
checkError(requestData)
asObject = makeObject(requestData.json())
returnList.append(asObject)
return returnList
else:
if "-" in param:
range = int(param.split("-")[1]) - int(param.split("-")[0])
page = int(param.split("-")[0]) / range
limiter = "page=" + str(page + 1) + "&page_size=" + str(range)
elif "," in param:
limiter = "id_set=" + param.replace(" ", "")
else:
print ("Invalid parameter, requires 'all', set (53, 75, 120), or paginated (1-100")
if requestType.lower() == "subjects":
requestData= requests.get(aspaceLogin[0] + "/" + requestType + "?" + limiter, headers=session)
else:
requestData= requests.get(aspaceLogin[0] + "/repositories/" + str(repo) + "/" + requestType + "?" + limiter, headers=session)
checkError(requestData)
returnList = makeObject(requestData.json())
return returnList
def postObject(session, object, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
uri = object.uri
try:
objectString = json.dumps(object)
except:
import ast
objectString = json.dumps(ast.literal_eval(str(object)))
postData = requests.post(aspaceLogin[0] + str(uri), data=objectString, headers=session)
checkError(postData)
return postData.status_code
def deleteObject(session, object, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
if "uri" in object.keys():
uri = object.uri
elif "record_uri" in object.keys():
uri = object.record_uri
else:
print ("ERROR: Could not find uri for record")
deleteRequest = requests.delete(aspaceLogin[0] + str(uri), headers=session)
checkError(deleteRequest)
return deleteRequest.status_code
################################################################
#REPOSITORIES
################################################################
def getRepositories(session, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
repoData = requests.get(aspaceLogin[0] + "/repositories", headers=session)
checkError(repoData)
repoList = makeObject(repoData.json())
return repoList
################################################################
#RESOURCES
################################################################
#returns a list of resources you can iterate though with all, a set, or a range of resource numbers
def getResources(session, repo, param, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
resourceList = multipleRequest(session, repo, param, "resources", aspaceLogin)
return resourceList
#return resource object with number
def getResource(session, repo, number, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
resource = singleRequest(session, repo, number, "resources", aspaceLogin)
return resource
#return a resource object by id_0 field using the index
def getResourceID(session, repo, id_0, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
response = requests.get(aspaceLogin[0] + "/repositories/" + str(repo) + "/search?page=1&aq={\"query\":{\"field\":\"identifier\", \"value\":\"" + id_0 + "\", \"jsonmodel_type\":\"field_query\"}}", headers=session)
checkError(response)
if len(response.json()["results"]) < 1:
print ("Error: could not find results for resource " + str(id_0))
else:
resourceID = response.json()["results"][0]["id"].split("/resources/")[1]
resource = singleRequest(session, repo, resourceID, "resources", aspaceLogin)
return resource
#returns a list of resources updated since ISO param
def getResourcesSince(session, repo, param, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
"""also accepts ISO (does not work)
if "-" in param:
if "t" in param.lower():
timeObject = datetime.strptime(param, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
timeObject = datetime.strptime(param, '%Y-%m-%d %H:%M:%S.%fZ')
param = (timeObject - datetime(1970, 1, 1)).total_seconds()
"""
resourceList = []
requestData = requests.get(aspaceLogin[0] + "/repositories/" + str(repo) + "/resources?all_ids=true&modified_since=" + str(param), headers=session)
checkError(requestData)
requestList = requestData.json()
for resourceID in requestList:
resourceList.append(getResource(session, repo, resourceID, aspaceLogin))
return resourceList
#creates an empty resource
def makeResource():
resourceString = '{"jsonmodel_type":"resource","external_ids":[],"subjects":[],"linked_events":[],"extents":[],"dates":[],"external_documents":[],"rights_statements":[],"linked_agents":[],"restrictions":false,"revision_statements":[],"instances":[],"deaccessions":[],"related_accessions":[],"classifications":[],"notes":[],"title":"","id_0":"","level":"","language":"","ead_id":"","finding_aid_date":"","ead_location":""}'
emptyResource = json.loads(resourceString)
resourceObject = makeObject(emptyResource)
return resourceObject
def postResource(session, repo, resourceObject, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
path = "/repositories/" + str(repo) + "/resources"
if "uri" in resourceObject.keys():
if len(resourceObject.uri) > 0:
path = resourceObject.uri
try:
resourceString = json.dumps(resourceObject)
except:
import ast
resourceString = json.dumps(ast.literal_eval(str(resourceObject)))
postResource = requests.post(aspaceLogin[0] + path, data=resourceString, headers=session)
checkError(postResource)
return postResource.status_code
################################################################
#NAVIGATION
################################################################
#return resource tree object from resource Object
def getTree(session, resourceObject, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
uri = resourceObject.uri
treeData = requests.get(aspaceLogin[0] + str(uri) + "/tree", headers=session)
checkError(treeData)
treeObject = makeObject(treeData.json())
return treeObject
#return a list of child objects from a Resource object or an Archival Object
def getChildren(session, object, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
def findChild(tree, uri, childrenObject):
for child in tree["children"]:
if child["record_uri"] == uri:
childrenObject = makeObject(child)
elif len(child["children"]) < 1:
pass
else:
childrenObject = findChild(child, uri, childrenObject)
return childrenObject
if object.jsonmodel_type == "archival_object":
#get children of archival object
aoURI = object.uri
resourceURI = object.resource.ref
childrenData = requests.get(aspaceLogin[0] + str(resourceURI) + "/tree", headers=session)
checkError(childrenData)
#limit to only children below original archival object
childrenObject = findChild(childrenData.json(), aoURI, None)
if childrenObject is None:
print ("ERROR could not find archival object in resource tree, uri: " + aoURI + " ref_id: " + object.ref_id)
#now just returns patent object, even if with .children being empty. This way it won't just fail if there is no children
#elif len(childrenObject["children"]) < 1:
#print ("ERROR archival object has no children, uri: " + aoURI + " ref_id: " + object.ref_id)
else:
return childrenObject
else:
#get children of a resource
childrenData = getTree(session, object, aspaceLogin)
return childrenData
################################################################
#ARCHIVAL OBJECTS
################################################################
#return archival object by id
def getArchObj(session, recordUri, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
aoData = requests.get(aspaceLogin[0] + str(recordUri), headers=session)
checkError(aoData)
aoObject = makeObject(aoData.json())
return aoObject
#return archival object by Ref ID
def getArchObjID(session, repo, refID, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
params = {"ref_id[]": refID}
aoData = requests.get(aspaceLogin[0] + "/repositories/" + repo + "/find_by_id/archival_objects", headers=session, params=params)
checkError(aoData)
if len(aoData.json()["archival_objects"]) < 1:
print ("ERROR cound not find archival object for ref ID " + refID)
else:
recordUri = aoData.json()["archival_objects"][0]["ref"]
aoObject = getArchObj(session, recordUri, aspaceLogin)
return aoObject
#creates an empty archival object
def makeArchObj():
objectString = '{"jsonmodel_type":"archival_object","external_ids":[],"subjects":[],"linked_events":[],"extents":[],"dates":[],"external_documents":[],"rights_statements":[],"linked_agents":[],"restrictions_apply":false,"instances":[],"notes":[],"title":"","level":""}'
emptyArchObj = json.loads(objectString)
aoObject = makeObject(emptyArchObj)
return aoObject
def postArchObj(session, repo, aoObject, aspaceLogin = None):
#get ASpace Login info
aspaceLogin = getLogin(aspaceLogin)
aoString = json.dumps(aoObject)
if "ref_id" in | |
<reponame>duserzym/BiCEP_GUI<gh_stars>1-10
import pandas as pd
import pandas as pd
from importlib import reload # allows reloading of modules
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import ipywidgets as widgets
from IPython.display import display, clear_output
from importlib import reload
import asyncio
import pickle
import pmagpy.pmag as pmag
import pmagpy.ipmag as ipmag
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from pmagpy import contribution_builder as cb
import scipy as scipy
import pickle
from sklearn.decomposition import PCA
from scipy.optimize import curve_fit
import sys
model_circle_fast=pickle.load(open('model_circle_fast.pkl','rb'))
model_circle_slow=pickle.load(open('model_circle_slow.pkl','rb'))
def sufficient_statistics(ptrm, nrm):
"""
inputs list of ptrm and nrm data and computes sufficent statistcs needed
for computations
Inputs
------
ptrm: list
list of ptrm data
nrm: list
list of nrm data
Returns
-------
dict containing mean ptrm and nrm, and covariances in xx, xy and yy.
"""
corr = np.cov( np.stack((ptrm, nrm), axis=0) )
return {'xbar': np.mean(ptrm), 'ybar': np.mean(nrm), 'S2xx': corr[0,0], 'S2yy': corr[1,1], 'S2xy': corr[0,1] }
def TaubinSVD(x,y):
"""
Function from PmagPy
algebraic circle fit
input: list [[x_1, y_1], [x_2, y_2], ....]
output: a, b, r. a and b are the center of the fitting circle, and r is the radius
Algebraic circle fit by Taubin
<NAME>, "Estimation Of Planar Curves, Surfaces And Nonplanar
Space Curves Defined By Implicit Equations, With
Applications To Edge And Range Image Segmentation",
IEEE Trans. PAMI, Vol. 13, pages 1115-1138, (1991)
"""
X = np.array(list(map(float, x)))
Xprime=X
Y = np.array(list(map(float, y)))
Yprime=Y
XY = np.array(list(zip(X, Y)))
XY = np.array(XY)
X = XY[:,0] - np.mean(XY[:,0]) # norming points by x avg
Y = XY[:,1] - np.mean(XY[:,1]) # norming points by y avg
centroid = [np.mean(XY[:,0]), np.mean(XY[:,1])]
Z = X * X + Y * Y
Zmean = np.mean(Z)
Z0 = (Z - Zmean)/(2. * np.sqrt(Zmean))
ZXY = np.array([Z0, X, Y]).T
U, S, V = np.linalg.svd(ZXY, full_matrices=False) #
V = V.transpose()
A = V[:,2]
A[0] = A[0]/(2. * np.sqrt(Zmean))
A = np.concatenate([A, [(-1. * Zmean * A[0])]], axis=0)
a, b = (-1 * A[1:3]) / A[0] / 2 + centroid
r = np.sqrt(A[1]*A[1]+A[2]*A[2]-4*A[0]*A[3])/abs(A[0])/2
errors=[]
for i in list(range(0,len(Xprime)-1)):
errors.append((np.sqrt((Xprime[i]-a)**2+(Yprime[i]-b)**2)-r)**2)
sigma=np.sqrt((sum(errors))/(len(Xprime)-1))
return a,b,r,sigma
def bestfit_line(ptrm, nrm):
"""
Returns the slope and intercept of the best fit line to a set of
pTRM and NRM data using York Regression
Inputs
------
ptrm: list or array
list of pTRM data
nrm: list or array
list of NRM data
Returns
-------
dictionary of slope and intercept for best fitting line.
"""
stat = sufficient_statistics(ptrm, nrm)
w = .5*(stat['S2xx'] - stat['S2yy'])/stat['S2xy']
m = -w-np.sqrt(w**2+1)
b = stat['ybar']-m*stat['xbar']
return {'slope': m, 'intercept': b }
def get_drat(IZZI,IZZI_trunc,P):
"""Calculates the difference ratio (DRAT) of pTRM checks
(Selkin and Tauxe, 2000) to check for alteration
Inputs
------
IZZI: pandas.DataFrame
DataFrame object in BiCEP format of all in field and
zero field measurements for a specimen.
IZZI_trunc: pandas.DataFrame
DataFrame object- same as IZZI but truncated only for temperatures
used in interpretation
P: pandas.DataFrame
DataFrame object containing pTRM checks up to the
maximum temperature of an interpretation
Returns
-------
absdiff: float
maximum DRAT for all pTRM check measurements.
returns zero if the interpretation is not valid.
"""
try:
IZZI_reduced=IZZI[IZZI.temp_step.isin(P.temp_step)]
a=np.sum((IZZI_trunc.PTRM-np.mean(IZZI_trunc.PTRM))*(IZZI_trunc.NRM-np.mean(IZZI_trunc.NRM)))
b=a/np.abs(a)*np.sqrt(np.sum((IZZI_trunc.NRM-np.mean(IZZI_trunc.NRM))**2)/np.sum((IZZI_trunc.PTRM-np.mean(IZZI_trunc.PTRM))**2))
yint=np.mean(IZZI_trunc.NRM)-b*np.mean(IZZI_trunc.PTRM)
line={'slope':b,'intercept':yint}
xprime=0.5*(IZZI_trunc.PTRM+(IZZI_trunc.NRM-line['intercept'])/line['slope'])
yprime=0.5*(IZZI_trunc.NRM+line['slope']*IZZI_trunc.PTRM+line['intercept'])
scalefactor=np.sqrt((min(xprime)-max(xprime))**2+(min(yprime)-max(yprime))**2)
absdiff=max(np.abs(P.PTRM.values-IZZI_reduced.PTRM.values)/scalefactor)*100
return(absdiff)
except:
return 0
def get_mad(IZZI,pca):
"""
Calculates the free Maximum Angle of Deviation (MAD) of Kirshvink et al (1980)
Inputs
------
IZZI: pandas.DataFrame
DataFrame object in BiCEP format of in field and
zero field measurements for a specimen (interpretation).
pca: scikitlearn.decomposition.PCA object
pca used to fit the vector direction.
Returns
-------
mad: float
maximum angle of deviation for that intepretation
"""
try:
fit=pca.fit(IZZI.loc[:,'NRM_x':'NRM_z'].values).explained_variance_
return np.degrees(np.arctan(np.sqrt((fit[2]+fit[1])/(fit[0]))))
except:
return 0
def get_dang(NRM_trunc_dirs,pca):
"""
Calculates the Deviation Angle
Inputs
------
NRM_trunc_dirs: numpy.ndarray
Vector directions for zero field measurements for specimen
pca: scikitlearn.decomposition.PCA object
pca used to fit the vector direction.
Returns
-------
dang: float
deviation angle for that intepretation
"""
try:
length, vector=pca.explained_variance_[0], pca.components_[0]
NRM_vect=np.mean(NRM_trunc_dirs,axis=0)
NRM_mean_magn=np.sqrt(sum(NRM_vect**2))
vector_magn=np.sqrt(sum(vector**2))
return(np.degrees(np.arccos(np.abs(np.dot(NRM_vect,vector)/(NRM_mean_magn*vector_magn)))))
except:
return(0)
def calculate_anisotropy_correction(IZZI):
"""
Calculates anisotropy correction factor for a
paleointensity interpretation, given an s tensor
Inputs
------
IZZI: pandas.DataFrame
DataFrame object in BiCEP format of in field and
zero field measurements for a specimen (interpretation).
Returns
-------
c: float
Anisotropy correction factor
"""
#Convert the s tensor into a numpy array
strlist=IZZI['s_tensor'].iloc[0].split(':')
slist=[]
for stringo in strlist:
slist.append(float(stringo.strip()))
stensor=np.array([[slist[0],slist[3],slist[5]],[slist[3],slist[1],slist[4]],[slist[5],slist[4],slist[2]]])
#Fit a PCA to the IZZI directions
NRM_trunc_dirs=IZZI.loc[:,'NRM_x':'NRM_z']
pca=PCA(n_components=3)
pca=pca.fit(NRM_trunc_dirs)
#Calculate the anisotropy correction factor (see Standard Paleointensity Definitions)
vector=pca.components_[0]
vector=vector/np.sqrt(np.sum(vector**2))
ancvector=np.matmul(np.linalg.inv(stensor),vector)
ancvector=ancvector/np.sqrt(np.sum(ancvector**2))
labmag=np.matmul(stensor,np.array([0,0,-1]))
ancmag=np.matmul(stensor,ancvector)
c=np.sqrt(np.sum(labmag**2))/np.sqrt(np.sum(ancmag**2))
return(c)
def calculate_NLT_correction(IZZI,c):
"""
Calculates the correction for non linear TRM for a paleointensity interpretation,
given the anisotropy and cooling rate corrections
Inputs
------
IZZI: pandas.DataFrame
DataFrame object in BiCEP format of in field and
zero field measurements for a specimen (interpretation).
c: float
Combined Anisotropy and Cooling Rate correction-
needed because the nonlinearity is applied after this.
Returns
-------
c: float
NLT correction factor
"""
a=np.sum((IZZI.PTRM-np.mean(IZZI.PTRM))*(IZZI.NRM-np.mean(IZZI.NRM)))
b=a/np.abs(a)*np.sqrt(np.sum((IZZI.NRM-np.mean(IZZI.NRM))**2)/np.sum((IZZI.PTRM-np.mean(IZZI.PTRM))**2))
beta=IZZI['NLT_beta'].iloc[0]
correction=c*IZZI.correction.iloc[0]
B_lab=IZZI.B_lab.iloc[0]*1e6
total_correction=(np.arctanh(correction*np.abs(b)*np.tanh(beta*B_lab)))/(np.abs(b)*beta*B_lab)
return(total_correction)
class ThellierData():
"""
Class which supports several methods using the BiCEP method in pandas.
Inputs
------
datafile: string for file name in BiCEP format
"""
def __init__(self,datafile):
self.data=pd.read_csv(datafile)
self.groupType='site'
try:
self.redo=pd.read_csv('thellier_gui.redo',delim_whitespace=True,header=None)
except:
self.redo=None
self.collections={siteName:SpecimenCollection(self,siteName,self.groupType) for siteName in self.data[self.groupType].unique()}
def __repr__(self):
reprstr='Set of Thellier Data Containing the '+'S'+self.groupType[1:]+'s:\n'
for key in self.collections.keys():
reprstr+= key+'\t('+str(len(self.collections[key].specimens))+' specimens)\n'
return(reprstr)
def __getitem__(self,item):
return(self.collections[item])
def switch_grouping(self):
if self.groupType=='site':
self.groupType='sample'
else:
self.groupType='site'
self.collections={siteName:SpecimenCollection(self,siteName,self.groupType) for siteName in self.data[self.groupType].unique()}
class SpecimenCollection():
"""
Collection of specimens (site or sample, from ThellierData Dataset)
Parameters
----------
parentData: ThellierData object
Set of Thellier Data the site/sample is derived from
collectionName: string
Name of specimen/site
key: string
Either 'site' for site or 'sample' for sample
"""
def __init__(self,parentData,collectionName,key):
self.name=collectionName
self.key=key
self.parentData=parentData
self.data=parentData.data[parentData.data[self.key]==collectionName]
self.specimens={specimenName:Specimen(self,specimenName) for specimenName in self.data.specimen.unique()}
self.fit=None
self.methcodes='IE-BICEP'
self.artist=None
def __repr__(self):
reprstr='Site containing the specimens:\n'
for specimenName in self.specimens.keys():
reprstr+=specimenName+'\n'
return(reprstr)
def __getitem__(self,item):
return(self.specimens[item])
def BiCEP_fit(self,n_samples=30000,priorstd=5,model=None,**kwargs):
"""
Performs the fitting routine using the BiCEP method for a given list of specimens from a single site.
Inputs
------
Specimenlist: iterable of specimen names (strings)
"""
minPTRMs=[]
minNRMs=[]
B_lab_list=[]
klist=[]
NRM0s=[]
pTRMsList=np.array([])
NRMsList=np.array([])
lengths=[]
philist=[]
dist_to_edgelist=[]
B_ancs=[]
dmaxlist=[]
PTRMmaxlist=[]
centroidlist=[]
i=0
#try:
for specimen in self.specimens.values():
if specimen.active==True:
specimen.save_changes()
minPTRM,minNRM,PTRMmax,k,phi,dist_to_edge,sigma,PTRMS,NRMS=specimen.BiCEP_prep()
NRM0=specimen.NRM0
minPTRMs.append(minPTRM)
minNRMs.append(minNRM)
line=bestfit_line(specimen.IZZI_trunc.PTRM,specimen.IZZI_trunc.NRM)
B_anc=-line['slope']*specimen.B_lab*specimen.IZZI_trunc.correction.iloc[0]
B_ancs.append(B_anc)
Pi,Pj=np.meshgrid(PTRMS,PTRMS)
Ni,Nj=np.meshgrid(NRMS,NRMS)
dmax=np.amax(np.sqrt((Pi-Pj)**2+(Ni-Nj)**2))
centroid=np.sqrt(np.mean(PTRMS)**2+np.mean(NRMS)**2)
B_lab_list.append(specimen.B_lab)
klist.append(k)
philist.append(phi)
dist_to_edgelist.append(dist_to_edge)
NRM0s.append(NRM0)
pTRMsList=np.append(pTRMsList,PTRMS)
NRMsList=np.append(NRMsList,NRMS)
lengths.append(int(len(PTRMS)))
dmaxlist.append(dmax)
PTRMmaxlist.append(PTRMmax)
centroidlist.append(centroid)
i+=1
if model==None:
if i<7:
model_circle=model_circle_slow
else:
model_circle=model_circle_fast
else:
model_circle=model
fit_circle=model_circle.sampling (
data={'I':len(pTRMsList),'M':len(lengths),'PTRM':pTRMsList,'NRM':NRMsList,'N':lengths,'PTRMmax':PTRMmaxlist,'B_labs':B_lab_list,'dmax':np.sqrt(dmaxlist),'centroid':centroidlist,'priorstd':priorstd},iter=n_samples,warmup=int(n_samples/2),
init=[{'k_scale':np.array(klist)*np.array(dist_to_edgelist),'phi':philist,'dist_to_edge':dist_to_edgelist,'int_real':B_ancs}]*4,**kwargs)
self.fit=fit_circle
#except:
#print('Something went wrong trying to do the BiCEP fit, try changing your temperature range')
def save_magic_tables(self):
"""
Saves data from the currently displayed site to the GUI
Inputs
------
None
Returns
-------
None
"""
fit=self.fit
sitestable=pd.read_csv(self.key+'s.txt',skiprows=1,sep='\t')
sitestable.loc[sitestable[self.key]==self.name,'int_abs_min']=round(np.percentile(fit['int_site'],2.5),1)/1e6
sitestable.loc[sitestable[self.key]==self.name,'int_abs_max']=round(np.percentile(fit['int_site'],97.5),1)/1e6
sitestable.loc[sitestable[self.key]==self.name,'int_abs']=round(np.percentile(fit['int_site'],50),1)/1e6
specimenstable=pd.read_csv('specimens.txt',skiprows=1,sep='\t')
speclist=[spec for spec in self.specimens.keys() if self.specimens[spec].active==True]
for i in range(len(speclist)):
specimen=speclist[i]
specfilter=(~specimenstable.method_codes.str.contains('LP-AN').fillna(False))&(specimenstable.specimen==specimen)
specimenstable.loc[specfilter,'int_abs_min']=round(np.percentile(fit['int_real'][:,i],2.5),1)/1e6
specimenstable.loc[specfilter,'int_abs_max']=round(np.percentile(fit['int_real'][:,i],97.5),1)/1e6
specimenstable.loc[specfilter,'int_abs']=round(np.percentile(fit['int_real'][:,i],50),1)/1e6
specimenstable.loc[specfilter,'int_k_min']=round(np.percentile(fit['k'][:,i],2.5),3)
specimenstable.loc[specfilter,'int_k_max']=round(np.percentile(fit['k'][:,i],97.5),3)
specimenstable.loc[specfilter,'int_k']=round(np.percentile(fit['k'][:,i],50),3)
specimenstable.loc[specfilter,'meas_step_min']=self[specimen].savedLowerTemp
specimenstable.loc[specfilter,'meas_step_max']=self[specimen].savedUpperTemp
method_codes=self[specimen].methcodes.split(':')
method_codes=list(set(method_codes))
newstr=''
for code in method_codes[:-1]:
newstr+=code
newstr+=':'
newstr+=method_codes[-1]
specimenstable.loc[specfilter,'method_codes']=self[specimen].methcodes
extra_columns=self[specimen].extracolumnsdict
for col in extra_columns.keys():
specimenstable.loc[specfilter,col]=extra_columns[col]
sitestable.loc[sitestable.site==self.name,'method_codes']=self.methcodes
specimenstable['meas_step_unit']='Kelvin'
sitestable=sitestable.fillna('')
specimenstable=specimenstable.fillna('')
sitesdict=sitestable.to_dict('records')
specimensdict=specimenstable.to_dict('records')
pmag.magic_write('sites.txt',sitesdict,'sites')
pmag.magic_write('specimens.txt',specimensdict,'specimens')
def regplot(self,ax,legend=False,title=None):
"""
Plots B vs k for all specimens in a site given a BiCEP or unpooled fit
Inputs
------
ax: matplotlib axis
axes to plot to
legend: bool
If set to True, plots a legend
title: str
Title for plot. Does not plot title if set to None.
"""
B_lab_list=[]
for specimen in self.specimens.values():
B_lab_list.append(specimen.B_lab)
try:
Bs=self.fit['int_real']
ks=self.fit['k']
mink,maxk=np.amin(ks),np.amax(ks)
minB,maxB=self.fit['c']*mink+self.fit['int_site'],self.fit['c']*maxk+self.fit['int_site']
c=np.random.choice(range(len(minB)),100)
ax.plot([mink,maxk],[minB[c],maxB[c]],color='skyblue',alpha=0.12)
except:
Bs=self.fit['slope']*np.array(B_lab_list).T
ks=self.fit['k']
ax.set_xlabel(r'$\vec{k}$');
ax.plot(np.percentile(ks,(2.5,97.5),axis=0),[np.median(Bs,axis=0),np.median(Bs,axis=0)],'k')
ax.plot([np.median(ks,axis=0),np.median(ks,axis=0)],np.percentile(Bs,(2.5,97.5),axis=0),'k')
ax.plot(np.median(ks,axis=0),np.median(Bs,axis=0),'o',markerfacecolor='lightgreen',markeredgecolor='k')
ax.axvline(0,color='k',linewidth=1)
if title!=None:
ax.set_title(title,fontsize=20,loc='left')
def get_specimen_rhats(self):
"""
Finds the worst Rhat va; for each specimen and assigns it to that specimen
"""
rhats=self.fit.summary()['summary'][:,-1]
rhat_params=self.fit.summary()['summary_rownames']
specimenlist=[specimen for specimen in self.specimens.values() if specimen.active==True]
for i in list(range(len(specimenlist))):
rhats_specimen=rhats[np.char.find(rhat_params,'['+str(i+1)+']')!=-1]
worst_rhat_specimen=rhats_specimen[np.abs(rhats_specimen-1)==max(np.abs(rhats_specimen-1))][0]
specimenlist[i].rhat=worst_rhat_specimen
def histplot(self,ax,**kwargs):
"""
Plots a histogram of the site level paleointensity estimate.
Inputs
------
**kwargs:
arguments to be passed to the histogram plot
Returns
-------
None
"""
ax.hist(self.fit['int_site'],bins=100,color='skyblue',density=True)
minB,maxB=np.percentile(self.fit['int_site'],(2.5,97.5))
ax.plot([minB,maxB],[0,0],'k',linewidth=4)
ax.set_xlabel('Intensity ($\mu$T)')
ax.set_ylabel('Probability Density')
class Specimen():
"""
Specimen from a given site or sample SpecimenCollection object.
Parameters
----------
parentCollection: SpecimenCollection object
Site/Sample the specimen is derived from.
specimenName: string
Name of specimen
"""
def __init__(self,parentCollection,specimenName):
#Inherent properties
self.parentCollection=parentCollection #Site or sample this specimen belongs to
self.name=specimenName
self.data=parentCollection.data[parentCollection.data.specimen==specimenName]
self.active=True #Used for BiCEP GUI- set to false if | |
= N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = | |
* s * iv(k,
b * s) * kv(
(-1) + k, s) + (
-3) * b * k * s * iv(k, b * s) * kv(
(-1) + k, s) + (-8) * b * k ** 2 * iv(
k,
b * s) * kv(
k, s) + 2 * iv((-1) + k,
b * s) * (
((-1) + k) * s * kv((-2) + k, s) + (
(-1) + 3 * k ** 2) * kv((-1) + k,
s) + (
-1) * ((-1) + k) * s * kv(k, s)) + (
-4) * b * s * iv(
k, b * s) * kv(1 + k,
s) + 4 * b * k * s * iv(
k, b * s) * kv(1 + k, s) + 8 * iv(
1 + k,
b * s) * kv(
1 + k, s) + (
-4) * k * iv(
1 + k, b * s) * kv(1 + k, s)) + iv(k,
s) ** 2 * (
(-2) * iv((-1) + k, b * s) * (
(4 * k * s + s ** 3) * kv((-2) + k, s) + (
4 * k + 4 * k ** 2 + s ** 2 + 3 * k * s ** 2) * kv(
(-1) + k, s) + (-1) * s ** 3 * kv(
k,
s)) + s * (
b * (4 * k + s ** 2) * iv((-2) + k,
b * s) * kv(
(-1) + k,
s) + 8 * iv(
1 + k, b * s) * (
(-1) * k * kv(k, s) + s * kv(1 + k,
s)) + b * iv(
k,
b * s) * (
3 * (4 * k + s ** 2) * kv((-1) + k,
s) + (
-4) * s * (
(-2) * k * kv(k, s) + s * kv(
1 + k,
s))))))
psi1_k0 = lambda s, b: (1 / 16) * pi ** (-2) * iv(1, s) * (
(-4) * s ** 2 * (iv(0, s) + iv(2, s)) * (
b * iv(0, b * s) * kv(0, s) + (-1) * iv(1, b * s) * kv(1, s)) + (
-8) * s * (iv(0, s) + s * iv(1, s)) * (
b * iv(0, b * s) * kv(1, s) + (-1) * iv(1, b * s) * kv(2, s)))
pi1_k0 = lambda s, b: (1 / 2) * pi ** (-2) * iv(1, s) * (
b * iv(0, b * s) + (-1) * s * iv(1, b * s) * (
iv(1, s) * kv(1, s) + iv(0, s) * kv(2, s)))
self._psi_xn1 = psi1(self._k, self._xn, self._b)
self._psi_yn1 = psi1(self._k, self._yn, self._b)
self._pi_xn1 = pi1(self._k, self._xn, self._b)
self._pi_yn1 = pi1(self._k, self._yn, self._b)
self._omega_xn1 = omega1(self._k, self._xn, self._b)
self._omega_yn1 = omega1(self._k, self._yn, self._b)
self._psi_xn1_k0 = psi1_k0(self._xn_k0, self._b)
self._omega_xn1_k0 = 0
self._pi_xn1_k0 = pi1_k0(self._xn_k0, self._b)
self._finish1 = True
return True
def _solve_prepare2(self):
err_msg = 'run _solve_prepare_yn first. '
assert self._finish_yn, err_msg
psi2 = lambda k, s, b: (1 / 16) * pi ** (-2) * (
s ** 2 * ((iv((-2) + k, s) + iv(k, s)) * iv(1 + k, s) + iv((-1) + k, s) * (
iv(k, s) + iv(2 + k, s))) * (
iv((-1) + k, b * s) * kv((-1) + k, s) + (-1) * iv(1 + k, b * s) * kv(1 + k,
s)) + (
-4) * b ** (-1) * (s * iv((-1) + k, s) + (-1) * ((-1) + k) * iv(k, s)) * (
b * ((-2) + k) * iv((-1) + k, b * s) * iv(1 + k, s) * kv((-1) + k, s) + (
-1) * k * iv(k, b * s) * iv(
1 + k, s) * kv(k, s) + iv((-1) + k, s) * (
(-1) * k * iv(k, b * s) * kv(k, s) + b * (2 + k) * iv(1 + k,
b * s) * kv(
1 + k, s))))
pi2 = lambda k, s, b: (1 / 4) * b ** (-1) * pi ** (-2) * (
iv(k, s) * iv(1 + k, s) * (
b * ((-2) + k) * iv((-1) + k, b * s) * kv((-1) + k, s) + (-1) * k * iv(k,
b * s) * kv(
k, s)) + iv(
(-1) + k,
s) * (
(-1) * b * s * iv((-1) + k, b * s) * iv(1 + k, s) * kv((-1) + k,
s) + b * s * iv(
1 + k, s) * iv(1 + k,
b * s) * kv(
1 + k, s) + iv(k, s) * (
(-1) * k * iv(k, b * s) * kv(k, s) + b * (2 + k) * iv(1 + k,
b * s) * kv(
1 + k, s))))
omega2 = lambda k, s, b: (1 / 2) * b ** (-1) * pi ** (-2) * s ** (-1) * (
(-1) * b * s ** 2 * iv((-1) + k, s) ** 2 * (
iv((-1) + k, b * s) * kv((-1) + k, s) + iv(1 + k, b * s) * kv(1 + k,
s)) + b * s * iv(
(-1) + k, s) * iv(
k,
s) * (
((-2) + 3 * k) * iv((-1) + k, b * s) * kv((-1) + k, s) + ((-2) + k) * iv(
1 + k, b * s) * kv(1 + k,
s)) + iv(k,
s) ** 2 * (
b * (4 * k + (-2) * k ** 2 + s ** 2) * iv((-1) + k, b * s) * kv((-1) + k,
s) + 2 * k ** 2 * iv(
k,
b * s) * kv(
k, s) + b * s ** 2 * iv(1 + k, b * s) * kv(1 + k, s)))
omega2_k0 = lambda s, b: pi ** (-2) * (
s * iv(0, s) ** 2 + (-2) * iv(0, s) * iv(1, s) + (-1) * s * iv(1, s) ** 2) * iv(1,
b * s) * kv(
1, s)
self._psi_xn2 = psi2(self._k, self._xn, self._b)
self._psi_yn2 = psi2(self._k, self._yn, self._b)
self._pi_xn2 = pi2(self._k, self._xn, self._b)
self._pi_yn2 = pi2(self._k, self._yn, self._b)
self._omega_xn2 = omega2(self._k, self._xn, self._b)
self._omega_yn2 = omega2(self._k, self._yn, self._b)
self._psi_yn2_k0 = 0
self._omega_yn2_k0 = omega2_k0(self._yn_k0, self._b)
self._pi_yn2_k0 = 0
self._finish2 = True
return True
def _solve_prepare3(self):
err_msg = 'run _solve_prepare_xn first. '
assert self._finish_xn, err_msg
psi3 = lambda k, s, b: (1 / 8) * pi ** (-2) * s * (
((iv((-2) + k, s) + iv(k, s)) * iv(1 + k, s) + iv((-1) + k, s) * (
iv(k, s) + iv(2 + k, s))) * (
(-1) * b * s * iv((-1) + k, b * s) * kv(k, s) + iv(k, b * s) * (
s * kv((-1) + k, s) + 2 * ((-1) + k) * kv(k, s))) + (-2) * (
s * iv((-1) + k, s) + (-1) * ((-1) + k) * iv(k, s)) * (
b * iv((-1) + k, b * s) * iv(1 + k, s) * kv((-1) + k, s) + (-1) * iv(k,
b * s) * iv(
1 + k, s) * kv(k,
s) + iv(
(-1) + k, s) * (
(-1) * iv(k, b * s) * kv(k, s) + b * iv(1 + k, b | |
# Copyright (c) 2018, Science and Technology Facilities Council
# This software is distributed under a BSD licence. See LICENSE.txt.
"""
load_functions
--------------
Module for top-level functions that open MRC files and form the main API of
the package.
"""
# Import Python 3 features for future-proofing
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import os
from .bzip2mrcfile import Bzip2MrcFile
from .constants import MAP_ID, MAP_ID_OFFSET_BYTES
from .future_mrcfile import FutureMrcFile
from .gzipmrcfile import GzipMrcFile
from .mrcfile import MrcFile
from .mrcmemmap import MrcMemmap
from . import utils
def new(name, data=None, compression=None, overwrite=False):
"""Create a new MRC file.
Args:
name: The file name to use.
data: Data to put in the file, as a :class:`numpy array
<numpy.ndarray>`. The default is :data:`None`, to create an empty
file.
compression: The compression format to use. Acceptable values are:
:data:`None` (the default; for no compression), ``'gzip'`` or
``'bzip2'``.
It's good practice to name compressed files with an appropriate
extension (for example, ``.mrc.gz`` for gzip) but this is not
enforced.
overwrite: Flag to force overwriting of an existing file. If
:data:`False` and a file of the same name already exists, the file
is not overwritten and an exception is raised.
Returns:
An :class:`~mrcfile.mrcfile.MrcFile` object (or a
subclass of it if ``compression`` is specified).
Raises:
:exc:`ValueError`: If the file already exists and overwrite is
:data:`False`.
:exc:`ValueError`: If the compression format is not recognised.
Warns:
RuntimeWarning: If the data array contains Inf or NaN values.
"""
if compression == 'gzip':
NewMrc = GzipMrcFile
elif compression == 'bzip2':
NewMrc = Bzip2MrcFile
elif compression is not None:
raise ValueError("Unknown compression format '{0}'"
.format(compression))
else:
NewMrc = MrcFile
mrc = NewMrc(name, mode='w+', overwrite=overwrite)
if data is not None:
mrc.set_data(data)
return mrc
def open(name, mode='r', permissive=False, header_only=False): # @ReservedAssignment
"""Open an MRC file.
This function opens both normal and compressed MRC files. Supported
compression formats are: gzip, bzip2.
It is possible to use this function to create new MRC files (using mode
``w+``) but the :func:`new` function is more flexible.
This function offers a permissive read mode for attempting to open corrupt
or invalid files. In permissive mode, :mod:`warnings` are issued instead of
exceptions if problems with the file are encountered. See
:class:`~mrcfile.mrcinterpreter.MrcInterpreter` or the
:doc:`usage guide <../usage_guide>` for more information.
Args:
name: The file name to open.
mode: The file mode to use. This should be one of the following: ``r``
for read-only, ``r+`` for read and write, or ``w+`` for a new empty
file. The default is ``r``.
permissive: Read the file in permissive mode. The default is
:data:`False`.
header_only: Only read the header (and extended header) from the file.
The default is :data:`False`.
Returns:
An :class:`~mrcfile.mrcfile.MrcFile` object (or a
:class:`~mrcfile.gzipmrcfile.GzipMrcFile` object if the file is
gzipped).
Raises:
:exc:`ValueError`: If the mode is not one of ``r``, ``r+`` or ``w+``.
:exc:`ValueError`: If the file is not a valid MRC file and
``permissive`` is :data:`False`.
:exc:`ValueError`: If the mode is ``w+`` and the file already exists.
(Call :func:`new` with ``overwrite=True`` to deliberately overwrite
an existing file.)
:exc:`OSError`: If the mode is ``r`` or ``r+`` and the file does not
exist.
Warns:
RuntimeWarning: If the file appears to be a valid MRC file but the data
block is longer than expected from the dimensions in the header.
RuntimeWarning: If the file is not a valid MRC file and ``permissive``
is :data:`True`.
RuntimeWarning: If the header's ``exttyp`` field is set to a known
value but the extended header's size is not a multiple of the
number of bytes in the corresponding dtype.
"""
NewMrc = MrcFile
if os.path.exists(name):
with io.open(name, 'rb') as f:
start = f.read(MAP_ID_OFFSET_BYTES + len(MAP_ID))
# Check for map ID string to avoid trying to decompress normal files
# where the nx value happens to include the magic number for a
# compressed format. (This still risks failing to correctly decompress
# compressed files which happen to have 'MAP ' at position 208, but
# that is less likely and if it does occur, the CompressedMrcFile
# class can always be used directly instead.)
if start[-len(MAP_ID):] != MAP_ID:
if start[:2] == b'\x1f\x8b':
NewMrc = GzipMrcFile
elif start[:2] == b'BZ':
NewMrc = Bzip2MrcFile
return NewMrc(name, mode=mode, permissive=permissive,
header_only=header_only)
def open_async(name, mode='r', permissive=False):
"""Open an MRC file asynchronously in a separate thread.
This allows a file to be opened in the background while the main thread
continues with other work. This can be a good way to improve performance if
the main thread is busy with intensive computation, but will be less
effective if the main thread is itself busy with disk I/O.
Multiple files can be opened in the background simultaneously. However,
this implementation is relatively crude; each call to this function will
start a new thread and immediately use it to start opening a file. If you
try to open many large files at the same time, performance will decrease as
all of the threads attempt to access the disk at once. You'll also risk
running out of memory to store the data from all the files.
This function returns a :class:`~mrcfile.future_mrcfile.FutureMrcFile`
object, which deliberately mimics the API of the
:class:`~concurrent.futures.Future` object from Python 3's
:mod:`concurrent.futures` module. (Future versions of this library might
return genuine :class:`~concurrent.futures.Future` objects instead.)
To get the real :class:`~mrcfile.mrcfile.MrcFile` object from a
:class:`~mrcfile.future_mrcfile.FutureMrcFile`, call
:meth:`~mrcfile.future_mrcfile.FutureMrcFile.result`. This will block until
the file has been read and the :class:`~mrcfile.mrcfile.MrcFile` object is
ready. To check if the :class:`~mrcfile.mrcfile.MrcFile` is ready without
blocking, call :meth:`~mrcfile.future_mrcfile.FutureMrcFile.running` or
:meth:`~mrcfile.future_mrcfile.FutureMrcFile.done`.
Args:
name: The file name to open.
mode: The file mode (one of ``r``, ``r+`` or ``w+``).
permissive: Read the file in permissive mode. The default is
:data:`False`.
Returns:
A :class:`~mrcfile.future_mrcfile.FutureMrcFile` object.
"""
return FutureMrcFile(open, (name,), dict(mode=mode, permissive=permissive))
def mmap(name, mode='r', permissive=False):
"""Open a memory-mapped MRC file.
This allows much faster opening of large files, because the data is only
accessed on disk when a slice is read or written from the data array. See
the :class:`~mrcfile.mrcmemmap.MrcMemmap` class documentation for more
information.
Because the memory-mapped data array accesses the disk directly, compressed
files cannot be opened with this function. In all other ways, :func:`mmap`
behaves in exactly the same way as :func:`open`. The
:class:`~mrcfile.mrcmemmap.MrcMemmap` object returned by this function can
be used in exactly the same way as a normal
:class:`~mrcfile.mrcfile.MrcFile` object.
Args:
name: The file name to open.
mode: The file mode (one of ``r``, ``r+`` or ``w+``).
permissive: Read the file in permissive mode. The default is
:data:`False`.
Returns:
An :class:`~mrcfile.mrcmemmap.MrcMemmap` object.
"""
return MrcMemmap(name, mode=mode, permissive=permissive)
def new_mmap(name, shape, mrc_mode=0, fill=None, overwrite=False):
"""Create a new, empty memory-mapped MRC file.
This function is useful for creating very large files. The initial contents
of the data array can be set with the ``fill`` parameter if needed, but be
aware that filling a large array can take a long time.
If ``fill`` is not set, the new data array's contents are unspecified and
system-dependent. (Some systems fill a new empty mmap with zeros, others
fill it with the bytes from the disk at the newly-mapped location.) If you
are definitely going to fill the entire array with new data anyway you can
safely leave ``fill`` as :data:`None`, otherwise it is advised to use a
sensible fill value (or ensure you are on a system that fills new mmaps
with a reasonable default value).
Args:
name: The file name to use.
shape: The shape of the data array to open, as a 2-, 3- or 4-tuple of
ints. For example, ``(nz, ny, nx)`` for a new 3D volume, or
``(ny, nx)`` for a new 2D image.
mrc_mode: The MRC mode to use for the new file. One of 0, 1, 2, 4 or 6,
which correspond to numpy dtypes as follows:
* mode 0 -> int8
* mode 1 -> int16
* mode 2 -> float32
* mode 4 -> complex64
* mode | |
*= stem.segL
# Get the end point position
end_co = stem.p.co.copy()
stem.spline.bezier_points.add()
newPoint = stem.spline.bezier_points[-1]
(newPoint.co,newPoint.handle_left_type,newPoint.handle_right_type) = (end_co + dirVec,hType,hType)
newPoint.radius = stem.radS*(1 - (stem.seg + 1)/stem.segMax) + stem.radE*((stem.seg + 1)/stem.segMax)
# There are some cases where a point cannot have handles as VECTOR straight away, set these now.
if numSplit != 0:
tempPoint = stem.spline.bezier_points[-2]
(tempPoint.handle_left_type,tempPoint.handle_right_type) = ('VECTOR','VECTOR')
if len(stem.spline.bezier_points) == 2:
tempPoint = stem.spline.bezier_points[0]
(tempPoint.handle_left_type,tempPoint.handle_right_type) = ('VECTOR','VECTOR')
# Update the last point in the spline to be the newly added one
stem.updateEnd()
#return splineList
def genLeafMesh(leafScale,leafScaleX,loc,quat,index,downAngle,downAngleV,rotate,rotateV,oldRot,bend,leaves, leafShape):
if leafShape == 'hex':
verts = [Vector((0,0,0)),Vector((0.5,0,1/3)),Vector((0.5,0,2/3)),Vector((0,0,1)),Vector((-0.5,0,2/3)),Vector((-0.5,0,1/3))]
edges = [[0,1],[1,2],[2,3],[3,4],[4,5],[5,0],[0,3]]
faces = [[0,1,2,3],[0,3,4,5]]
elif leafShape == 'rect':
verts = [Vector((1,0,0)),Vector((1,0,1)),Vector((-1,0,1)),Vector((-1,0,0))]
edges = [[0,1],[1,2],[2,3],[3,0]]
faces = [[0,1,2,3],]
#faces = [[0,1,5],[1,2,4,5],[2,3,4]]
vertsList = []
facesList = []
# If the special -ve flag is used we need a different rotation of the leaf geometry
if leaves < 0:
rotMat = Matrix.Rotation(oldRot,3,'Y')
oldRot += rotate/(abs(leaves)-1)
else:
oldRot += rotate+uniform(-rotateV,rotateV)
downRotMat = Matrix.Rotation(downAngle+uniform(-downAngleV,downAngleV),3,'X')
rotMat = Matrix.Rotation(oldRot,3,'Z')
normal = yAxis.copy()
#dirVec = zAxis.copy()
orientationVec = zAxis.copy()
# If the bending of the leaves is used we need to rotated them differently
if (bend != 0.0) and (leaves >= 0):
# normal.rotate(downRotMat)
# orientationVec.rotate(downRotMat)
#
# normal.rotate(rotMat)
# orientationVec.rotate(rotMat)
normal.rotate(quat)
orientationVec.rotate(quat)
thetaPos = atan2(loc.y,loc.x)
thetaBend = thetaPos - atan2(normal.y,normal.x)
rotateZ = Matrix.Rotation(bend*thetaBend,3,'Z')
normal.rotate(rotateZ)
orientationVec.rotate(rotateZ)
phiBend = atan2((normal.xy).length,normal.z)
orientation = atan2(orientationVec.y,orientationVec.x)
rotateZOrien = Matrix.Rotation(orientation,3,'X')
rotateX = Matrix.Rotation(bend*phiBend,3,'Z')
rotateZOrien2 = Matrix.Rotation(-orientation,3,'X')
# For each of the verts we now rotate and scale them, then append them to the list to be added to the mesh
for v in verts:
v.z *= leafScale
v.x *= leafScaleX*leafScale
if leaves > 0:
v.rotate(downRotMat)
v.rotate(rotMat)
v.rotate(quat)
if (bend != 0.0) and (leaves > 0):
# Correct the rotation
v.rotate(rotateZ)
v.rotate(rotateZOrien)
v.rotate(rotateX)
v.rotate(rotateZOrien2)
#v.rotate(quat)
for v in verts:
v += loc
vertsList.append([v.x,v.y,v.z])
for f in faces:
facesList.append([f[0] + index,f[1] + index,f[2] + index,f[3] + index])
return vertsList,facesList,oldRot
def addTree(props):
global splitError
#startTime = time.time()
# Set the seed for repeatable results
seed(props.seed)#
# Set all other variables
levels = props.levels#
length = props.length#
lengthV = props.lengthV#
branches = props.branches#
curveRes = props.curveRes#
curve = toRad(props.curve)#
curveV = toRad(props.curveV)#
curveBack = toRad(props.curveBack)#
baseSplits = props.baseSplits#
segSplits = props.segSplits#
splitAngle = toRad(props.splitAngle)#
splitAngleV = toRad(props.splitAngleV)#
scale = props.scale#
scaleV = props.scaleV#
attractUp = props.attractUp#
shape = int(props.shape)#
baseSize = props.baseSize
ratio = props.ratio
taper = props.taper#
ratioPower = props.ratioPower#
downAngle = toRad(props.downAngle)#
downAngleV = toRad(props.downAngleV)#
rotate = toRad(props.rotate)#
rotateV = toRad(props.rotateV)#
scale0 = props.scale0#
scaleV0 = props.scaleV0#
prune = props.prune#
pruneWidth = props.pruneWidth#
pruneWidthPeak = props.pruneWidthPeak#
prunePowerLow = props.prunePowerLow#
prunePowerHigh = props.prunePowerHigh#
pruneRatio = props.pruneRatio#
leafScale = props.leafScale#
leafScaleX = props.leafScaleX#
leafShape = props.leafShape
bend = props.bend#
leafDist = int(props.leafDist)#
bevelRes = props.bevelRes#
resU = props.resU#
useArm = props.useArm
frameRate = props.frameRate
windSpeed = props.windSpeed
windGust = props.windGust
armAnim = props.armAnim
leafObj = None
# Some effects can be turned ON and OFF, the necessary variables are changed here
if not props.bevel:
bevelDepth = 0.0
else:
bevelDepth = 1.0
if not props.showLeaves:
leaves = 0
else:
leaves = props.leaves
if props.handleType == '0':
handles = 'AUTO'
else:
handles = 'VECTOR'
for ob in bpy.data.objects:
ob.select = False
childP = []
stemList = []
# Initialise the tree object and curve and adjust the settings
cu = bpy.data.curves.new('tree','CURVE')
treeOb = bpy.data.objects.new('tree',cu)
bpy.context.scene.objects.link(treeOb)
cu.dimensions = '3D'
cu.fill_mode = 'FULL'
cu.bevel_depth = bevelDepth
cu.bevel_resolution = bevelRes
# Fix the scale of the tree now
scaleVal = scale + uniform(-scaleV,scaleV)
# If pruning is turned on we need to draw the pruning envelope
if prune:
enHandle = 'VECTOR'
enNum = 128
enCu = bpy.data.curves.new('envelope','CURVE')
enOb = bpy.data.objects.new('envelope',enCu)
enOb.parent = treeOb
bpy.context.scene.objects.link(enOb)
newSpline = enCu.splines.new('BEZIER')
newPoint = newSpline.bezier_points[-1]
newPoint.co = Vector((0,0,scaleVal))
(newPoint.handle_right_type,newPoint.handle_left_type) = (enHandle,enHandle)
# Set the coordinates by varying the z value, envelope will be aligned to the x-axis
for c in range(enNum):
newSpline.bezier_points.add()
newPoint = newSpline.bezier_points[-1]
ratioVal = (c+1)/(enNum)
zVal = scaleVal - scaleVal*(1-baseSize)*ratioVal
newPoint.co = Vector((scaleVal*pruneWidth*shapeRatio(8,ratioVal,pruneWidthPeak,prunePowerHigh,prunePowerLow),0,zVal))
(newPoint.handle_right_type,newPoint.handle_left_type) = (enHandle,enHandle)
newSpline = enCu.splines.new('BEZIER')
newPoint = newSpline.bezier_points[-1]
newPoint.co = Vector((0,0,scaleVal))
(newPoint.handle_right_type,newPoint.handle_left_type) = (enHandle,enHandle)
# Create a second envelope but this time on the y-axis
for c in range(enNum):
newSpline.bezier_points.add()
newPoint = newSpline.bezier_points[-1]
ratioVal = (c+1)/(enNum)
zVal = scaleVal - scaleVal*(1-baseSize)*ratioVal
newPoint.co = Vector((0,scaleVal*pruneWidth*shapeRatio(8,ratioVal,pruneWidthPeak,prunePowerHigh,prunePowerLow),zVal))
(newPoint.handle_right_type,newPoint.handle_left_type) = (enHandle,enHandle)
leafVerts = []
leafFaces = []
levelCount = []
splineToBone = deque([''])
addsplinetobone = splineToBone.append
# Each of the levels needed by the user we grow all the splines
for n in range(levels):
storeN = n
stemList = deque()
addstem = stemList.append
# If n is used as an index to access parameters for the tree it must be at most 3 or it will reference outside the array index
n = min(3,n)
vertAtt = attractUp
splitError = 0.0
# If this is the first level of growth (the trunk) then we need some special work to begin the tree
if n == 0:
vertAtt = 0.0
newSpline = cu.splines.new('BEZIER')
cu.resolution_u = resU
newPoint = newSpline.bezier_points[-1]
newPoint.co = Vector((0,0,0))
newPoint.handle_right = Vector((0,0,1))
newPoint.handle_left = Vector((0,0,-1))
#(newPoint.handle_right_type,newPoint.handle_left_type) = ('VECTOR','VECTOR')
branchL = (scaleVal)*(length[0] + uniform(-lengthV[0],lengthV[0]))
childStems = branches[1]
startRad = branchL*ratio*(scale0 + uniform(-scaleV0,scaleV0))
endRad = startRad*(1 - taper[0])
newPoint.radius = startRad
addstem(stemSpline(newSpline,curve[0]/curveRes[0],curveV[0]/curveRes[0],0,curveRes[0],branchL/curveRes[0],childStems,startRad,endRad,0))
# If this isn't the trunk then we may have multiple stem to intialise
else:
# Store the old rotation to allow new stems to be rotated away from the previous one.
oldRotate = 0
# For each of the points defined in the list of stem starting points we need to grow a stem.
for p in childP:
# Add a spline and set the coordinate of the first point.
newSpline = cu.splines.new('BEZIER')
cu.resolution_u = resU
newPoint = newSpline.bezier_points[-1]
newPoint.co = p.co
tempPos = zAxis.copy()
# If the -ve flag for downAngle is used we need a special formula to find it
if downAngleV[n] < 0.0:
downV = downAngleV[n]*(1 - 2*shapeRatio(0,(p.lengthPar - p.offset)/(p.lengthPar - baseSize*scaleVal)))
random()
# Otherwise just find a random value
else:
downV = uniform(-downAngleV[n],downAngleV[n])
downRotMat = Matrix.Rotation(downAngle[n]+downV,3,'X')
tempPos.rotate(downRotMat)
# If the -ve flag for rotate is used we need to find which side of the stem the last child point was and then grow in the opposite direction.
if rotate[n] < 0.0:
oldRotate = -copysign(rotate[n] + uniform(-rotateV[n],rotateV[n]),oldRotate)
# Otherwise just generate a random number in the specified range
else:
oldRotate += rotate[n]+uniform(-rotateV[n],rotateV[n])
# Rotate the direction of growth and set the new point coordinates
rotMat = Matrix.Rotation(oldRotate,3,'Z')
tempPos.rotate(rotMat)
tempPos.rotate(p.quat)
newPoint.handle_right = p.co + tempPos
# If this is the first level of branching then upward attraction has no effect and a special formula is used to find branch length and the number of child stems
if n == 1:
vertAtt = 0.0
lMax = length[1] + uniform(-lengthV[1],lengthV[1])
branchL = p.lengthPar*lMax*shapeRatio(shape,(p.lengthPar - p.offset)/(p.lengthPar - baseSize*scaleVal))
childStems = branches[2]*(0.2 + 0.8*(branchL/p.lengthPar)/lMax)
elif storeN <= levels - 2:
branchL = (length[n] + uniform(-lengthV[n],lengthV[n]))*(p.lengthPar - 0.6*p.offset)
childStems = branches[min(3,n+1)]*(1.0 - 0.5*p.offset/p.lengthPar)
# If this is the last level before leaves then we need to generate the child points differently
else:
branchL = (length[n] + uniform(-lengthV[n],lengthV[n]))*(p.lengthPar - 0.6*p.offset)
if leaves < 0:
childStems = False
else:
childStems = leaves*shapeRatio(leafDist,p.offset/p.lengthPar)
# Determine the starting and ending radii of the stem using the tapering of the stem
startRad = min(p.radiusPar[0]*((branchL/p.lengthPar)**ratioPower), p.radiusPar[1])
endRad = startRad*(1 - taper[n])
newPoint.radius = startRad
# If curveBack is used then the curviness of the stem is different for the first half
if curveBack[n] == 0:
curveVal = curve[n]/curveRes[n]
else:
curveVal = 2*curve[n]/curveRes[n]
# Add the new stem to list of stems to grow and define which bone it will be parented to
addstem(stemSpline(newSpline,curveVal,curveV[n]/curveRes[n],0,curveRes[n],branchL/curveRes[n],childStems,startRad,endRad,len(cu.splines)-1))
addsplinetobone(p.parBone)
childP = []
# Now grow each of the stems in the list of those to be extended
for st in stemList:
# When using pruning, we need to ensure that the random effects will be the same for each | |
<reponame>Xarthisius/yt
import abc
from functools import wraps
import numpy as np
from yt.config import ytcfg
from yt.data_objects.image_array import ImageArray
from yt.funcs import ensure_numpy_array, is_sequence, mylog
from yt.geometry.grid_geometry_handler import GridIndex
from yt.geometry.oct_geometry_handler import OctreeIndex
from yt.utilities.amr_kdtree.api import AMRKDTree
from yt.utilities.lib.bounding_volume_hierarchy import BVH
from yt.utilities.lib.misc_utilities import zlines, zpoints
from yt.utilities.lib.octree_raytracing import OctreeRayTracing
from yt.utilities.lib.partitioned_grid import PartitionedGrid
from yt.utilities.on_demand_imports import NotAModule
from yt.utilities.parallel_tools.parallel_analysis_interface import (
ParallelAnalysisInterface,
)
from yt.visualization.image_writer import apply_colormap
from .transfer_function_helper import TransferFunctionHelper
from .transfer_functions import (
ColorTransferFunction,
ProjectionTransferFunction,
TransferFunction,
)
from .utils import (
data_source_or_all,
get_corners,
new_interpolated_projection_sampler,
new_mesh_sampler,
new_projection_sampler,
new_volume_render_sampler,
)
from .zbuffer_array import ZBuffer
try:
from yt.utilities.lib.embree_mesh import mesh_traversal
# Catch ValueError in case size of objects in Cython change
except (ImportError, ValueError):
mesh_traversal = NotAModule("pyembree")
ytcfg["yt", "ray_tracing_engine"] = "yt"
try:
from yt.utilities.lib.embree_mesh import mesh_construction
# Catch ValueError in case size of objects in Cython change
except (ImportError, ValueError):
mesh_construction = NotAModule("pyembree")
ytcfg["yt", "ray_tracing_engine"] = "yt"
def invalidate_volume(f):
@wraps(f)
def wrapper(*args, **kwargs):
ret = f(*args, **kwargs)
obj = args[0]
if isinstance(obj._transfer_function, ProjectionTransferFunction):
obj.sampler_type = "projection"
obj._log_field = False
obj._use_ghost_zones = False
del obj.volume
obj._volume_valid = False
return ret
return wrapper
def validate_volume(f):
@wraps(f)
def wrapper(*args, **kwargs):
obj = args[0]
fields = [obj.field]
log_fields = [obj.log_field]
if obj.weight_field is not None:
fields.append(obj.weight_field)
log_fields.append(obj.log_field)
if not obj._volume_valid:
obj.volume.set_fields(
fields, log_fields, no_ghost=(not obj.use_ghost_zones)
)
obj._volume_valid = True
return f(*args, **kwargs)
return wrapper
class RenderSource(ParallelAnalysisInterface):
"""Base Class for Render Sources.
Will be inherited for volumes, streamlines, etc.
"""
volume_method = None
def __init__(self):
super().__init__()
self.opaque = False
self.zbuffer = None
@abc.abstractmethod
def render(self, camera, zbuffer=None):
pass
@abc.abstractmethod
def _validate(self):
pass
class OpaqueSource(RenderSource):
"""A base class for opaque render sources.
Will be inherited from for LineSources, BoxSources, etc.
"""
def __init__(self):
super().__init__()
self.opaque = True
def set_zbuffer(self, zbuffer):
self.zbuffer = zbuffer
def create_volume_source(data_source, field):
data_source = data_source_or_all(data_source)
ds = data_source.ds
index_class = ds.index.__class__
if issubclass(index_class, GridIndex):
return KDTreeVolumeSource(data_source, field)
elif issubclass(index_class, OctreeIndex):
return OctreeVolumeSource(data_source, field)
else:
raise NotImplementedError
class VolumeSource(RenderSource, abc.ABC):
"""A class for rendering data from a volumetric data source
Examples of such sources include a sphere, cylinder, or the
entire computational domain.
A :class:`VolumeSource` provides the framework to decompose an arbitrary
yt data source into bricks that can be traversed and volume rendered.
Parameters
----------
data_source: :class:`AMR3DData` or :class:`Dataset`, optional
This is the source to be rendered, which can be any arbitrary yt
data object or dataset.
field : string
The name of the field to be rendered.
Examples
--------
The easiest way to make a VolumeSource is to use the volume_render
function, so that the VolumeSource gets created automatically. This
example shows how to do this and then access the resulting source:
>>> import yt
>>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
>>> im, sc = yt.volume_render(ds)
>>> volume_source = sc.get_source(0)
You can also create VolumeSource instances by hand and add them to Scenes.
This example manually creates a VolumeSource, adds it to a scene, sets the
camera, and renders an image.
>>> import yt
>>> from yt.visualization.volume_rendering.api import (
... Camera, Scene, create_volume_source)
>>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
>>> sc = Scene()
>>> source = create_volume_source(ds.all_data(), "density")
>>> sc.add_source(source)
>>> sc.add_camera()
>>> im = sc.render()
"""
_image = None
data_source = None
volume_method = None
def __init__(self, data_source, field):
r"""Initialize a new volumetric source for rendering."""
super().__init__()
self.data_source = data_source_or_all(data_source)
field = self.data_source._determine_fields(field)[0]
self.current_image = None
self.check_nans = False
self.num_threads = 0
self.num_samples = 10
self.sampler_type = "volume-render"
self._volume_valid = False
# these are caches for properties, defined below
self._volume = None
self._transfer_function = None
self._field = field
self._log_field = self.data_source.ds.field_info[field].take_log
self._use_ghost_zones = False
self._weight_field = None
self.tfh = TransferFunctionHelper(self.data_source.pf)
self.tfh.set_field(self.field)
@property
def transfer_function(self):
"""The transfer function associated with this VolumeSource"""
if self._transfer_function is not None:
return self._transfer_function
if self.tfh.tf is not None:
self._transfer_function = self.tfh.tf
return self._transfer_function
mylog.info("Creating transfer function")
self.tfh.set_field(self.field)
self.tfh.set_log(self.log_field)
self.tfh.build_transfer_function()
self.tfh.setup_default()
self._transfer_function = self.tfh.tf
return self._transfer_function
@transfer_function.setter
def transfer_function(self, value):
self.tfh.tf = None
valid_types = (
TransferFunction,
ColorTransferFunction,
ProjectionTransferFunction,
type(None),
)
if not isinstance(value, valid_types):
raise RuntimeError(
"transfer_function not a valid type, "
"received object of type %s" % type(value)
)
if isinstance(value, ProjectionTransferFunction):
self.sampler_type = "projection"
if self._volume is not None:
fields = [self.field]
if self.weight_field is not None:
fields.append(self.weight_field)
self._volume_valid = False
self._transfer_function = value
@property
def volume(self):
"""The abstract volume associated with this VolumeSource
This object does the heavy lifting to access data in an efficient manner
using a KDTree
"""
return self._get_volume()
@volume.setter
def volume(self, value):
assert isinstance(value, AMRKDTree)
del self._volume
self._field = value.fields
self._log_field = value.log_fields
self._volume = value
assert self._volume_valid
@volume.deleter
def volume(self):
del self._volume
self._volume = None
@property
def field(self):
"""The field to be rendered"""
return self._field
@field.setter
@invalidate_volume
def field(self, value):
field = self.data_source._determine_fields(value)
if len(field) > 1:
raise RuntimeError(
"VolumeSource.field can only be a single field but received "
"multiple fields: %s"
) % field
field = field[0]
if self._field != field:
log_field = self.data_source.ds.field_info[field].take_log
self.tfh.bounds = None
else:
log_field = self._log_field
self._log_field = log_field
self._field = value
self.transfer_function = None
self.tfh.set_field(value)
self.tfh.set_log(log_field)
@property
def log_field(self):
"""Whether or not the field rendering is computed in log space"""
return self._log_field
@log_field.setter
@invalidate_volume
def log_field(self, value):
self.transfer_function = None
self.tfh.set_log(value)
self._log_field = value
@property
def use_ghost_zones(self):
"""Whether or not ghost zones are used to estimate vertex-centered data
values at grid boundaries"""
return self._use_ghost_zones
@use_ghost_zones.setter
@invalidate_volume
def use_ghost_zones(self, value):
self._use_ghost_zones = value
@property
def weight_field(self):
"""The weight field for the rendering
Currently this is only used for off-axis projections.
"""
return self._weight_field
@weight_field.setter
@invalidate_volume
def weight_field(self, value):
self._weight_field = value
def set_transfer_function(self, transfer_function):
"""Set transfer function for this source"""
self.transfer_function = transfer_function
return self
def _validate(self):
"""Make sure that all dependencies have been met"""
if self.data_source is None:
raise RuntimeError("Data source not initialized")
def set_volume(self, volume):
"""Associates an AMRKDTree with the VolumeSource"""
self.volume = volume
return self
def set_field(self, field):
"""Set the source's field to render
Parameters
----------
field: field name
The field to render
"""
self.field = field
return self
def set_log(self, log_field):
"""Set whether the rendering of the source's field is done in log space
Generally volume renderings of data whose values span a large dynamic
range should be done on log space and volume renderings of data with
small dynamic range should be done in linear space.
Parameters
----------
log_field: boolean
If True, the volume rendering will be done in log space, and if False
will be done in linear space.
"""
self.log_field = log_field
return self
def set_weight_field(self, weight_field):
"""Set the source's weight field
.. note::
This is currently only used for renderings using the
ProjectionTransferFunction
Parameters
----------
weight_field: field name
The weight field to use in the rendering
"""
self.weight_field = weight_field
return self
def set_use_ghost_zones(self, use_ghost_zones):
"""Set whether or not interpolation at grid edges uses ghost zones
Parameters
----------
use_ghost_zones: boolean
If True, the AMRKDTree estimates vertex centered data using ghost
zones, which can eliminate seams in the resulting volume rendering.
Defaults to False for performance reasons.
"""
self.use_ghost_zones = use_ghost_zones
return self
def set_sampler(self, camera, interpolated=True):
"""Sets a volume render sampler
The type of sampler is determined based on the ``sampler_type`` attribute
of the VolumeSource. Currently the ``volume_render`` and ``projection``
sampler types are supported.
The 'interpolated' argument is only meaningful for projections. If True,
the data is first interpolated to the cell vertices, and then
tri-linearly interpolated to the ray sampling positions. If False, then
the cell-centered data is simply accumulated along the
ray. Interpolation is always performed for volume renderings.
"""
if self.sampler_type == "volume-render":
sampler = new_volume_render_sampler(camera, self)
elif self.sampler_type == "projection" and interpolated:
sampler = new_interpolated_projection_sampler(camera, self)
elif self.sampler_type == "projection":
sampler = new_projection_sampler(camera, self)
else:
NotImplementedError(f"{self.sampler_type} not implemented yet")
self.sampler = sampler
assert self.sampler is not None
@abc.abstractmethod
def _get_volume(self):
"""The abstract volume associated with this VolumeSource
This object does the heavy lifting to access data in an efficient manner
using a KDTree
"""
pass
@abc.abstractmethod
@validate_volume
def render(self, camera, zbuffer=None):
"""Renders an image using the provided camera
Parameters
----------
camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
A volume rendering camera. Can be any type of camera.
zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance # noqa: | |
import numpy as np
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
def make_fisher_matrix(params_dict, fisher_params, hpeak=0.0, obs='GS',
sigma=None, sigma_mod_frac=0.,
k_min=None, k_max=None,
z_min=None, z_max=None,
axis_PS=None, cosmo_key='CDM',
add_sigma_poisson=False):
"""
Make Fisher matrix and its inverse from global signal or powerspectra
Parameters
----------
params_dict : dict
Dictionary of parameter objects
fisher_params : list
List of parameter strings to use for Fisher matrix (these strings must be the keys to params_dict)
hpeak : float
TODO
obs : str
'GS' - global signal, 'PS' - power spectrum
sigma : None,array
TODO
sigma_mod_frac : float
Fraction of modelling error in PS e.g. 0.2 adds a 20% error on the PS in quadrature to the 21cmsense error
k_min : None,float
Minimum k to use for PS [1/Mpc]
k_max : None,float
Maximum k to use for PS [1/Mpc]
z_min : None,float
Minimum redshift to use for PS
z_max : None,float
Maximum redshift to use for PS
axis_PS : None,int
TODO
cosmo_key : None,str
TODO
add_sigma_poisson : bool
TODO
Return
-------
Fisher matrix, Finv matrix
"""
Fij_matrix = np.zeros((len(fisher_params), len(fisher_params)))
for i,p1 in enumerate(fisher_params):
if i == 0 and obs == 'PS':
k_where = np.arange(len(params_dict[p1].PS_err[0]['k']))
if k_min is not None and k_max is not None: # k range in 1/Mpc
k_where = np.where((params_dict[p1].PS_err[0]['k'] <= k_max) & (params_dict[p1].PS_err[0]['k'] >= k_min))[0]
z_where = np.arange(len(params_dict[p1].PS_z_HERA))
if z_min is not None and z_max is not None:
z_where = np.where((params_dict[p1].PS_z_HERA <= z_max) & (params_dict[p1].PS_z_HERA >= z_min))[0]
# Model error (e.g. 20%)
sigma_mod = sigma_mod_frac * params_dict[p1].PS_fid[z_where][:,k_where]
# if cosmo_key is None:
# cosmo_key = params_dict[p1].deriv_PS.keys()[0]
PS0 = params_dict[p1].deriv_PS[cosmo_key][z_where][:,k_where]
# Poisson error
if add_sigma_poisson:
sigma_poisson = params_dict[p1].PS_err_Poisson[z_where][:,k_where]
else:
sigma_poisson = 0.
# Fisher as a function of redshift or k?
if axis_PS is not None:
Fij_matrix = np.zeros((PS0.shape[axis_PS-1], len(fisher_params), len(fisher_params)))
for j,p2 in enumerate(fisher_params):
if obs == 'GS':
if i==0 and j==0:
print('GS shape:',params_dict[p1].deriv_GS[cosmo_key].shape)
Fij_matrix[i,j] = Fij(params_dict[p1].deriv_GS[cosmo_key],
params_dict[p2].deriv_GS[cosmo_key],
sigma_obs=1, sigma_mod=0.)
elif obs == 'PS':
if sigma is None:
sigma_PS = params_dict[p1].PS_sigma[z_where][:,k_where]
else:
sigma_PS = sigma
if i==0 and j==0:
print('PS shape:',params_dict[p1].deriv_PS[cosmo_key][z_where][:,k_where].shape)
if axis_PS is not None:
Fij_matrix[:,i,j] = Fij(params_dict[p1].deriv_PS[cosmo_key][z_where][:,k_where],
params_dict[p2].deriv_PS[cosmo_key][z_where][:,k_where],
sigma_obs=sigma_PS, sigma_mod=sigma_mod, sigma_poisson=sigma_poisson, axis=axis_PS)
else:
Fij_matrix[i,j] = Fij(params_dict[p1].deriv_PS[cosmo_key][z_where][:,k_where],
params_dict[p2].deriv_PS[cosmo_key][z_where][:,k_where],
sigma_obs=sigma_PS, sigma_mod=sigma_mod, sigma_poisson=sigma_poisson, axis=axis_PS)
Finv = np.linalg.inv(Fij_matrix)
return Fij_matrix, Finv
def Fij(dObs_dtheta_i, dObs_dtheta_j,
sigma_obs=1., sigma_mod=0., sigma_poisson=0., axis=None):
"""
Make fisher matrix elements
Parameters
----------
dObs_dtheta_i : array_like
derivative wrt theta_i
dObs_dtheta_j : array_like
derivative wrt theta_j
sigma_obs : array_like
measurement uncertainties in the observations
sigma_mod :
modelling uncertainty
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default, axis=None,
will sum all of the elements of the input array. If axis is negative
it counts from the last to the first axis.
Return
------
F_ij fisher matrix element : float
"""
sigma_sq = sigma_obs**2. + sigma_mod**2. + sigma_poisson**2.
return np.sum(dObs_dtheta_i * dObs_dtheta_j/sigma_sq, axis=axis)
def fisher_correlations(Fij_matrix, fisher_params, plot=True):
"""
Fisher correlation matrix
Parameters
----------
Fij_matrix : array_like
Fisher information matrix
fisher_params: list
ordered list of parameters in the fisher matrix
plot: bool
heatmap plot
Return
------
R_ij_fisher correlation matrix
"""
R_ij_fisher = np.zeros((len(fisher_params), len(fisher_params)))
for i,p1 in enumerate(fisher_params):
for j,p2 in enumerate(fisher_params):
R_ij_fisher[i,j] = Fij_matrix[i,j]/np.sqrt(Fij_matrix[i,i]*Fij_matrix[j,j])
if plot:
mask = np.triu(np.ones_like(R_ij_fisher, dtype=bool))
sns.heatmap(R_ij_fisher, mask=mask,
cmap='RdBu',
xticklabels=fisher_params,yticklabels=fisher_params,
square=True, linewidths=.5, cbar_kws={"shrink": 1, 'label':'Correlation $r_{ij}$'})
return R_ij_fisher
# TODO generalize
alpha_std = {1.:1.52, 2.:2.48, 3.:3.44}
def get_ellipse_params(i: int, j: int, cov: np.array):
"""
Extract ellipse parameters from covariance matrix.
Based on Coe 2009
Parameters
----------
i : int
index of parameter 1
j : int
index of parameter 2
cov : array_like
covariance matrix
Return
------
ellipse a, b, angle in degrees
"""
# equations 1-4 Coe 2009. returns in degrees
def length(cov, sign=1):
"""
Calculate length of the ellipse semi-major/semi-minor axes
Aka the eigenvalues of the covariance matrix
"""
return np.sqrt(0.5*(cov[i,i] + cov[j,j]) + sign*np.sqrt(0.25*(cov[i,i] - cov[j,j])**2. + cov[i,j]*cov[j,i]))
def angle_deg(cov):
"""
Calculate angle of ellipse in degrees (anti-clockwise from x axis)
Gets the quadrant right!
"""
return np.degrees(0.5*np.arctan2(2*cov[i,j],(cov[i,i] - cov[j,j])))
a = length(cov, sign=1)
b = length(cov, sign=-1)
t = angle_deg(cov)
return a, b, t
def plot_ellipse(ax, par1, par2, parameters, fiducial, cov,
resize_lims=True, positive_definite=[],
N_std=[1.,2.,3.], plot_rescale = 4.,
kwargs=[{'ls': '-'}],
color='tab:blue',
default_kwargs={'lw':0}):
"""
Plot N-sigma ellipses, from Coe 2009.
Parameters
----------
ax : matpotlib axis
axis upon which the ellipses will be drawn
par1 : string
parameter 1 name
par2 : string
parameter 2 name
parameters : list
list of parameter names
fiducial : array_like(ndim,)
fiducial values of parameters
cov : array_like(ndim,ndim,)
covariance matrix
color : string
color to plot ellipse with
positive_definite : list of string
convenience input, parameter names passed in this list
will be cut off at 0 in plots.
Returns
-------
list of float : sigma_x, sigma_y, sigma_xy
"""
# Find parameters in list
params = parameters
pind = dict(zip(params, list(range(len(params)))))
i = pind[par1]
j = pind[par2]
sigma_x = np.sqrt(cov[i,i])
sigma_y = np.sqrt(cov[j,j])
sigma_xy = cov[i,j]
a, b, theta = get_ellipse_params(i, j, cov=cov)
# Plot for each N sigma
for nn, N in enumerate(N_std):
# use defaults and then override with other kwargs
kwargs_temp = default_kwargs.copy()
if len(kwargs) > 1:
kwargs_temp.update(kwargs[nn])
else:
kwargs_temp.update(kwargs[0])
kwargs_n = kwargs_temp
e = Ellipse(
xy=(fiducial[i], fiducial[j]),
width=a * 2 * alpha_std[N], height=b * 2 * alpha_std[N],
angle=theta, zorder=0, facecolor=color, **kwargs_n)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
# Rescale the axes a bit
if resize_lims:
if par1 in positive_definite:
ax.set_xlim(max(0.0, -plot_rescale*sigma_x),
fiducial[i]+plot_rescale*sigma_x)
else:
ax.set_xlim(fiducial[i] - plot_rescale * sigma_x,
fiducial[i] + plot_rescale * sigma_x)
if par2 in positive_definite:
ax.set_ylim(max(0.0, fiducial[j] - plot_rescale * sigma_y),
fiducial[j] + plot_rescale * sigma_y)
else:
ax.set_ylim(fiducial[j] - plot_rescale * sigma_y,
fiducial[j] + plot_rescale * sigma_y)
return sigma_x, sigma_y, sigma_xy
def title_double_ellipses(axes, labels,
chain=None,
med=None, sigma=None,
title_fontsize=18, title_pad=55,
vspace=0.,
color='k'
):
"""
Plot title with parameter constraints from 2 covariance matrixes/chains
Parameters
----------
axes : matpotlib axess
axes upon which the titles will be added
labels : list(ndim,)
list of parameter names
chain : array_like(ndim,), optional
MCMC chain of parameters
med : array_like(ndim,), optional
list of median values
sigma : array_like(ndim,)
list of sigmas
color : string
color to plot ellipse with
Returns
-------
None
"""
if chain is not None:
l, med, u = np.percentile(chain, [16,50,84], axis=0)
q_m, q_p = med - l, u - med
for i in range(len(labels)):
if med[i] < 100:
fmt = "{{0:{0}}}".format('.2f').format
else:
fmt = "{{0:{0}}}".format('.0f').format
if chain is not None:
CI = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
CI = CI.format(fmt(med[i]), fmt(q_m[i]), fmt(q_p[i]))
else:
CI = r"${{{0}}} \pm {{{1}}}$"
CI = CI.format(fmt(med[i]), fmt(sigma[i]))
axes[i,i].set_title(f'{labels[i]}', fontsize=title_fontsize, pad=title_pad)
axes[i,i].annotate(f'{CI}',
xy=(0.5,1.05+vspace), ha='center',
xycoords='axes fraction', color=color)
return
def plot_triangle(params, fiducial, cov, fig=None, ax=None,
positive_definite=[],
labels=None,
resize_lims=True,
N_std=[1.,2.], plot_rescale = 4.,
ellipse_color='tab:blue',
ellipse_kwargs=[{},
{'alpha':0.5}],
title_fontsize=20,
xlabel_kwargs={'labelpad': 5, 'fontsize':18},
ylabel_kwargs={'labelpad': 5, 'fontsize':18},
fig_kwargs={'figsize': (8, 8)},
plot1D_kwargs={'c':'black', 'lw':1}):
"""
Make a triangle plot from a covariance matrix
Based on https://github.com/xzackli/fishchips-public/blob/master/fishchips/util.py
Parameters
----------
params : list of strings
List of parameter strings
fiducial : array
Numpy array consisting of where the centers of ellipses should be
cov : numpy array
Covariance matrix to plot
fig : optional, matplotlib figure
Pass this if you already have a figure
ax : array containing matplotlib axes
Pass this if you already have a set of matplotlib axes
positive_definite: list
List of parameter strings which are positive definite
resize_lims : bool
Resize ellipse limits to scale of the errors [default = True]
N_std : list
List of number of standard deviations to plot
labels : list
List of labels corresponding to each dimension of the covariance matrix
ellipse_kwargs : dict
Keyword arguments for passing to the 1-sigma Matplotlib Ellipse call. You
can change this to change the color of your ellipses, for example.
xlabel_kwargs : dict
Keyword arguments which are passed to `ax.set_xlabel()`. You can change the
color and font-size of the x-labels, for example. By default, it includes
a little bit of label padding.
ylabel_kwargs : dict
Keyword arguments which are passed to `ax.set_ylabel()`. You can change the
color and font-size of the y-labels, for example. By default, it includes
a | |
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Specialized tasks for NPIV World-Wide Port Names (WWPNs)."""
from oslo_log import log as logging
from pypowervm import const as c
from pypowervm import exceptions as e
from pypowervm.i18n import _
from pypowervm import util as u
from pypowervm.utils import uuid
from pypowervm.wrappers import base_partition as bp
from pypowervm.wrappers import job as pvm_job
from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import virtual_io_server as pvm_vios
import six
LOG = logging.getLogger(__name__)
_ANY_WWPN = '-1'
_FUSED_ANY_WWPN = '-1 -1'
_GET_NEXT_WWPNS = 'GetNextWWPNs'
def build_wwpn_pair(adapter, host_uuid, pair_count=1):
"""Builds a WWPN pair that can be used for a VirtualFCAdapter.
Note: The API will only generate up to 8 pairs at a time. Any more will
cause the API to raise an error.
:param adapter: The adapter to talk over the API.
:param host_uuid: The host system for the generation.
:param pair_count: (Optional, Default: 1) The number of WWPN pairs to
generate. Can not be more than 8 or else the API will
fail.
:return: Non-mutable WWPN Pairs (list)
"""
# Build up the job & invoke
resp = adapter.read(
pvm_ms.System.schema_type, root_id=host_uuid,
suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_GET_NEXT_WWPNS)
job_w = pvm_job.Job.wrap(resp)
job_p = [job_w.create_job_parameter('numberPairsRequested',
str(pair_count))]
job_w.run_job(host_uuid, job_parms=job_p)
# Get the job result, and parse the output.
job_result = job_w.get_job_results_as_dict()
return job_result['wwpnList'].split(',')
def find_vios_for_wwpn(vios_wraps, p_port_wwpn):
"""Will find the VIOS that has a PhysFCPort for the p_port_wwpn.
:param vios_wraps: A list or set of VIOS wrappers.
:param p_port_wwpn: The physical port's WWPN.
:return: The VIOS wrapper that contains a physical port with the WWPN.
If there is not one, then None will be returned.
:return: The port (which is a PhysFCPort wrapper) on the VIOS wrapper that
represents the physical port.
"""
# Sanitize our input
s_p_port_wwpn = u.sanitize_wwpn_for_api(p_port_wwpn)
for vios_w in vios_wraps:
for port in vios_w.pfc_ports:
# No need to sanitize the API WWPN, it comes from the API.
if u.sanitize_wwpn_for_api(port.wwpn) == s_p_port_wwpn:
return vios_w, port
return None, None
def find_vios_for_vfc_wwpns(vios_wraps, vfc_wwpns):
"""Will find the VIOS that is hosting the vfc_wwpns.
:param vios_wraps: A list or set of VIOS wrappers.
:param vfc_wwpns: The list or set of virtual fibre channel WWPNs.
:return: The VIOS wrapper that supports the vfc adapters. If there is not
one, then None will be returned.
:return: The VFCMapping on the VIOS that supports the client adapters.
"""
# Sanitize our input
vfc_wwpns = {u.sanitize_wwpn_for_api(x) for x in vfc_wwpns}
for vios_w in vios_wraps:
for vfc_map in vios_w.vfc_mappings:
# If the map has no client adapter...then move on
if not vfc_map.client_adapter:
continue
# Maps without backing ports are effectively stale. We shouldn't
# consider them.
if vfc_map.backing_port is None:
continue
# If the WWPNs match, return it
if vfc_wwpns == set(vfc_map.client_adapter.wwpns):
return vios_w, vfc_map
return None, None
def intersect_wwpns(wwpn_set1, wwpn_set2):
"""Will return the intersection of WWPNs between the two sets.
:param wwpn_set1: A list of WWPNs.
:param wwpn_set2: A list of WWPNs.
:return: The intersection of the WWPNs. Will maintain the WWPN format
of wwpn_set1, but the comparison done will be agnostic of
formats (ex. colons and/or upper/lower case).
"""
wwpn_set2 = [u.sanitize_wwpn_for_api(x) for x in wwpn_set2]
return [y for y in wwpn_set1 if u.sanitize_wwpn_for_api(y) in wwpn_set2]
def derive_base_npiv_map(vios_wraps, p_port_wwpns, v_port_count):
"""Builds a blank NPIV port mapping, without any known vFC WWPNs.
This method is functionally similar to the derive_npiv_map. However, the
derive_npiv_map method assumes knowledge of the Virtual Fibre Channel
mappings beforehand. This method will generate a similar map, but when
sent to the add_map method, that method will allow the API to generate the
globally unique WWPNs rather than pre-seeding them.
:param vios_wraps: A list of VIOS wrappers. Can be built using the
extended attribute group (xag) of VIO_FMAP.
:param p_port_wwpns: A list of the WWPNs (strings) that can be used to
map the ports to. These WWPNs should reside on
Physical FC Ports on the VIOS wrappers that were
passed in.
:param v_port_count: The number of virtual ports to create.
:return: A list of sets. The format will be similar to that of the
derive_npiv_map method. However, instead of a fused_vfc_port_wwpn
a marker will be used to indicate that the API should generate
the WWPN.
"""
# Double the count of the markers. Should result in -1 -1 as the WWPN.
v_port_markers = [_ANY_WWPN] * v_port_count * 2
return derive_npiv_map(vios_wraps, p_port_wwpns, v_port_markers)
def derive_npiv_map(vios_wraps, p_port_wwpns, v_port_wwpns, preserve=True):
"""This method will derive a NPIV map.
A NPIV map is the linkage between an NPIV virtual FC Port and the backing
physical port. Two v_port_wwpns get tied to an individual p_port_wwpn.
A list of the 'mappings' will be returned. One per pair of v_port_wwpns.
The mappings will first attempt to spread across the VIOSes. Within each
VIOS, the port with the most available free NPIV ports will be selected.
There are scenarios where ports on a single VIOS could be reused.
- 4 v_port_wwpns, all p_port_wwpns reside on single VIOS
- 8 v_port_wwpns, only two VIOSes
- Etc...
In these scenarios, the ports will be spread such that they're running
across all the physical ports (that were passed in) on a given VIOS.
In even rarer scenarios, the same physical port may be re-used if the
v_port_wwpn pairs exceed the total number of p_port_wwpns.
:param vios_wraps: A list of VIOS wrappers. Can be built using the
extended attribute group (xag) of VIO_FMAP.
:param p_port_wwpns: A list of the WWPNs (strings) that can be used to
map the ports to. These WWPNs should reside on
Physical FC Ports on the VIOS wrappers that were
passed in.
:param v_port_wwpns: A list of the virtual fibre channel port WWPNs. Must
be an even number of ports.
:param preserve: (Optional, Default=True) If True, existing mappings with
matching virtual fibre channel ports are preserved. Else
new mappings are generated.
:return: A list of tuples representing both new and preserved mappings.
The format will be:
[ (p_port_wwpn1, fused_vfc_port_wwpn1),
(p_port_wwpn2, fused_vfc_port_wwpn2),
etc... ]
A 'fused_vfc_port_wwpn' is simply taking two v_port_wwpns, sanitizing them
and then putting them into a single string separated by a space.
"""
# Fuse all the v_port_wwpns together.
fused_v_port_wwpns = _fuse_vfc_ports(v_port_wwpns)
# Up front sanitization of all the p_port_wwpns
p_port_wwpns = list(map(u.sanitize_wwpn_for_api, p_port_wwpns))
existing_maps = []
new_fused_wwpns = []
# Detect if any mappings already exist on the system.
for fused_v_wwpn in fused_v_port_wwpns:
# If the mapping already exists, then add it to the existing maps.
vfc_map = has_client_wwpns(vios_wraps, fused_v_wwpn.split(" "))[1]
# Preserve an existing mapping if preserve=True. Otherwise, the
# backing_port may not be set and this is not an error condition if
# the vfc mapping is getting rebuilt.
if vfc_map is not None and preserve:
# Maps without backing ports are effectively stale. We shouldn't
# need to preserve them.
if vfc_map.backing_port is not None:
mapping = (vfc_map.backing_port.wwpn, fused_v_wwpn)
existing_maps.append(mapping)
else:
new_fused_wwpns.append(fused_v_wwpn)
LOG.debug("Add new map for client wwpns %s. Existing map=%s, "
"preserve=%s", fused_v_wwpn, vfc_map, preserve)
return _derive_npiv_map(
vios_wraps, new_fused_wwpns, p_port_wwpns, existing_maps)
def _derive_npiv_map(vios_wraps, new_fused_wwpns, p_port_wwpns,
existing_maps):
# Determine how many mappings are needed.
needed_maps = len(new_fused_wwpns)
newly_built_maps = []
next_vio_pos = 0
fuse_map_pos = 0
loops_since_last_add = 0
# This loop will continue through each VIOS (first set of load balancing
# should be done by VIOS) and if there are ports on that VIOS, will add
# them to the mapping.
#
# There is a rate limiter here though. If none of the VIOSes are servicing
# the request, then this has potential to be infinite loop. The rate
# limiter detects | |
UrlMapDefaultRouteActionRetryPolicyPerTryTimeout.to_proto(
resource.per_try_timeout
)
)
else:
res.ClearField("per_try_timeout")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapDefaultRouteActionRetryPolicy(
retry_condition=Primitive.from_proto(resource.retry_condition),
num_retries=Primitive.from_proto(resource.num_retries),
per_try_timeout=UrlMapDefaultRouteActionRetryPolicyPerTryTimeout.from_proto(
resource.per_try_timeout
),
)
class UrlMapDefaultRouteActionRetryPolicyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [UrlMapDefaultRouteActionRetryPolicy.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [UrlMapDefaultRouteActionRetryPolicy.from_proto(i) for i in resources]
class UrlMapDefaultRouteActionRetryPolicyPerTryTimeout(object):
def __init__(self, seconds: int = None, nanos: int = None):
self.seconds = seconds
self.nanos = nanos
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = url_map_pb2.ComputeBetaUrlMapDefaultRouteActionRetryPolicyPerTryTimeout()
if Primitive.to_proto(resource.seconds):
res.seconds = Primitive.to_proto(resource.seconds)
if Primitive.to_proto(resource.nanos):
res.nanos = Primitive.to_proto(resource.nanos)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapDefaultRouteActionRetryPolicyPerTryTimeout(
seconds=Primitive.from_proto(resource.seconds),
nanos=Primitive.from_proto(resource.nanos),
)
class UrlMapDefaultRouteActionRetryPolicyPerTryTimeoutArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
UrlMapDefaultRouteActionRetryPolicyPerTryTimeout.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
UrlMapDefaultRouteActionRetryPolicyPerTryTimeout.from_proto(i)
for i in resources
]
class UrlMapDefaultRouteActionRequestMirrorPolicy(object):
def __init__(self, backend_service: str = None):
self.backend_service = backend_service
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = url_map_pb2.ComputeBetaUrlMapDefaultRouteActionRequestMirrorPolicy()
if Primitive.to_proto(resource.backend_service):
res.backend_service = Primitive.to_proto(resource.backend_service)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapDefaultRouteActionRequestMirrorPolicy(
backend_service=Primitive.from_proto(resource.backend_service),
)
class UrlMapDefaultRouteActionRequestMirrorPolicyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
UrlMapDefaultRouteActionRequestMirrorPolicy.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
UrlMapDefaultRouteActionRequestMirrorPolicy.from_proto(i) for i in resources
]
class UrlMapDefaultRouteActionCorsPolicy(object):
def __init__(
self,
allow_origin: list = None,
allow_origin_regex: list = None,
allow_method: list = None,
allow_header: list = None,
expose_header: list = None,
max_age: int = None,
allow_credentials: bool = None,
disabled: bool = None,
):
self.allow_origin = allow_origin
self.allow_origin_regex = allow_origin_regex
self.allow_method = allow_method
self.allow_header = allow_header
self.expose_header = expose_header
self.max_age = max_age
self.allow_credentials = allow_credentials
self.disabled = disabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = url_map_pb2.ComputeBetaUrlMapDefaultRouteActionCorsPolicy()
if Primitive.to_proto(resource.allow_origin):
res.allow_origin.extend(Primitive.to_proto(resource.allow_origin))
if Primitive.to_proto(resource.allow_origin_regex):
res.allow_origin_regex.extend(
Primitive.to_proto(resource.allow_origin_regex)
)
if Primitive.to_proto(resource.allow_method):
res.allow_method.extend(Primitive.to_proto(resource.allow_method))
if Primitive.to_proto(resource.allow_header):
res.allow_header.extend(Primitive.to_proto(resource.allow_header))
if Primitive.to_proto(resource.expose_header):
res.expose_header.extend(Primitive.to_proto(resource.expose_header))
if Primitive.to_proto(resource.max_age):
res.max_age = Primitive.to_proto(resource.max_age)
if Primitive.to_proto(resource.allow_credentials):
res.allow_credentials = Primitive.to_proto(resource.allow_credentials)
if Primitive.to_proto(resource.disabled):
res.disabled = Primitive.to_proto(resource.disabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapDefaultRouteActionCorsPolicy(
allow_origin=Primitive.from_proto(resource.allow_origin),
allow_origin_regex=Primitive.from_proto(resource.allow_origin_regex),
allow_method=Primitive.from_proto(resource.allow_method),
allow_header=Primitive.from_proto(resource.allow_header),
expose_header=Primitive.from_proto(resource.expose_header),
max_age=Primitive.from_proto(resource.max_age),
allow_credentials=Primitive.from_proto(resource.allow_credentials),
disabled=Primitive.from_proto(resource.disabled),
)
class UrlMapDefaultRouteActionCorsPolicyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [UrlMapDefaultRouteActionCorsPolicy.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [UrlMapDefaultRouteActionCorsPolicy.from_proto(i) for i in resources]
class UrlMapDefaultRouteActionFaultInjectionPolicy(object):
def __init__(self, delay: dict = None, abort: dict = None):
self.delay = delay
self.abort = abort
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = url_map_pb2.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicy()
if UrlMapDefaultRouteActionFaultInjectionPolicyDelay.to_proto(resource.delay):
res.delay.CopyFrom(
UrlMapDefaultRouteActionFaultInjectionPolicyDelay.to_proto(
resource.delay
)
)
else:
res.ClearField("delay")
if UrlMapDefaultRouteActionFaultInjectionPolicyAbort.to_proto(resource.abort):
res.abort.CopyFrom(
UrlMapDefaultRouteActionFaultInjectionPolicyAbort.to_proto(
resource.abort
)
)
else:
res.ClearField("abort")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapDefaultRouteActionFaultInjectionPolicy(
delay=UrlMapDefaultRouteActionFaultInjectionPolicyDelay.from_proto(
resource.delay
),
abort=UrlMapDefaultRouteActionFaultInjectionPolicyAbort.from_proto(
resource.abort
),
)
class UrlMapDefaultRouteActionFaultInjectionPolicyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
UrlMapDefaultRouteActionFaultInjectionPolicy.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
UrlMapDefaultRouteActionFaultInjectionPolicy.from_proto(i)
for i in resources
]
class UrlMapDefaultRouteActionFaultInjectionPolicyDelay(object):
def __init__(self, fixed_delay: dict = None, percentage: float = None):
self.fixed_delay = fixed_delay
self.percentage = percentage
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = url_map_pb2.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelay()
if UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay.to_proto(
resource.fixed_delay
):
res.fixed_delay.CopyFrom(
UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay.to_proto(
resource.fixed_delay
)
)
else:
res.ClearField("fixed_delay")
if Primitive.to_proto(resource.percentage):
res.percentage = Primitive.to_proto(resource.percentage)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapDefaultRouteActionFaultInjectionPolicyDelay(
fixed_delay=UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay.from_proto(
resource.fixed_delay
),
percentage=Primitive.from_proto(resource.percentage),
)
class UrlMapDefaultRouteActionFaultInjectionPolicyDelayArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
UrlMapDefaultRouteActionFaultInjectionPolicyDelay.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
UrlMapDefaultRouteActionFaultInjectionPolicyDelay.from_proto(i)
for i in resources
]
class UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(object):
def __init__(self, seconds: int = None, nanos: int = None):
self.seconds = seconds
self.nanos = nanos
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
url_map_pb2.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay()
)
if Primitive.to_proto(resource.seconds):
res.seconds = Primitive.to_proto(resource.seconds)
if Primitive.to_proto(resource.nanos):
res.nanos = Primitive.to_proto(resource.nanos)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(
seconds=Primitive.from_proto(resource.seconds),
nanos=Primitive.from_proto(resource.nanos),
)
class UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
UrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay.from_proto(i)
for i in resources
]
class UrlMapDefaultRouteActionFaultInjectionPolicyAbort(object):
def __init__(self, http_status: int = None, percentage: float = None):
self.http_status = http_status
self.percentage = percentage
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = url_map_pb2.ComputeBetaUrlMapDefaultRouteActionFaultInjectionPolicyAbort()
if Primitive.to_proto(resource.http_status):
res.http_status = Primitive.to_proto(resource.http_status)
if Primitive.to_proto(resource.percentage):
res.percentage = Primitive.to_proto(resource.percentage)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapDefaultRouteActionFaultInjectionPolicyAbort(
http_status=Primitive.from_proto(resource.http_status),
percentage=Primitive.from_proto(resource.percentage),
)
class UrlMapDefaultRouteActionFaultInjectionPolicyAbortArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
UrlMapDefaultRouteActionFaultInjectionPolicyAbort.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
UrlMapDefaultRouteActionFaultInjectionPolicyAbort.from_proto(i)
for i in resources
]
class UrlMapDefaultUrlRedirect(object):
def __init__(
self,
host_redirect: str = None,
path_redirect: str = None,
prefix_redirect: str = None,
redirect_response_code: str = None,
https_redirect: bool = None,
strip_query: bool = None,
):
self.host_redirect = host_redirect
self.path_redirect = path_redirect
self.prefix_redirect = prefix_redirect
self.redirect_response_code = redirect_response_code
self.https_redirect = https_redirect
self.strip_query = strip_query
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = url_map_pb2.ComputeBetaUrlMapDefaultUrlRedirect()
if Primitive.to_proto(resource.host_redirect):
res.host_redirect = Primitive.to_proto(resource.host_redirect)
if Primitive.to_proto(resource.path_redirect):
res.path_redirect = Primitive.to_proto(resource.path_redirect)
if Primitive.to_proto(resource.prefix_redirect):
res.prefix_redirect = Primitive.to_proto(resource.prefix_redirect)
if UrlMapDefaultUrlRedirectRedirectResponseCodeEnum.to_proto(
resource.redirect_response_code
):
res.redirect_response_code = UrlMapDefaultUrlRedirectRedirectResponseCodeEnum.to_proto(
resource.redirect_response_code
)
if Primitive.to_proto(resource.https_redirect):
res.https_redirect = Primitive.to_proto(resource.https_redirect)
if Primitive.to_proto(resource.strip_query):
res.strip_query = Primitive.to_proto(resource.strip_query)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapDefaultUrlRedirect(
host_redirect=Primitive.from_proto(resource.host_redirect),
path_redirect=Primitive.from_proto(resource.path_redirect),
prefix_redirect=Primitive.from_proto(resource.prefix_redirect),
redirect_response_code=UrlMapDefaultUrlRedirectRedirectResponseCodeEnum.from_proto(
resource.redirect_response_code
),
https_redirect=Primitive.from_proto(resource.https_redirect),
strip_query=Primitive.from_proto(resource.strip_query),
)
class UrlMapDefaultUrlRedirectArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [UrlMapDefaultUrlRedirect.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [UrlMapDefaultUrlRedirect.from_proto(i) for i in resources]
class UrlMapHostRule(object):
def __init__(
self, description: str = None, host: list = None, path_matcher: str = None
):
self.description = description
self.host = host
self.path_matcher = path_matcher
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = url_map_pb2.ComputeBetaUrlMapHostRule()
if Primitive.to_proto(resource.description):
res.description = Primitive.to_proto(resource.description)
if Primitive.to_proto(resource.host):
res.host.extend(Primitive.to_proto(resource.host))
if Primitive.to_proto(resource.path_matcher):
res.path_matcher = Primitive.to_proto(resource.path_matcher)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapHostRule(
description=Primitive.from_proto(resource.description),
host=Primitive.from_proto(resource.host),
path_matcher=Primitive.from_proto(resource.path_matcher),
)
class UrlMapHostRuleArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [UrlMapHostRule.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [UrlMapHostRule.from_proto(i) for i in resources]
class UrlMapPathMatcher(object):
def __init__(
self,
name: str = None,
description: str = None,
default_service: str = None,
default_route_action: dict = None,
default_url_redirect: dict = None,
path_rule: list = None,
route_rule: list = None,
header_action: dict = None,
):
self.name = name
self.description = description
self.default_service = default_service
self.default_route_action = default_route_action
self.default_url_redirect = default_url_redirect
self.path_rule = path_rule
self.route_rule = route_rule
self.header_action = header_action
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = url_map_pb2.ComputeBetaUrlMapPathMatcher()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.description):
res.description = Primitive.to_proto(resource.description)
if Primitive.to_proto(resource.default_service):
res.default_service = Primitive.to_proto(resource.default_service)
if UrlMapDefaultRouteAction.to_proto(resource.default_route_action):
res.default_route_action.CopyFrom(
UrlMapDefaultRouteAction.to_proto(resource.default_route_action)
)
else:
res.ClearField("default_route_action")
if UrlMapPathMatcherDefaultUrlRedirect.to_proto(resource.default_url_redirect):
res.default_url_redirect.CopyFrom(
UrlMapPathMatcherDefaultUrlRedirect.to_proto(
resource.default_url_redirect
)
)
else:
res.ClearField("default_url_redirect")
if UrlMapPathMatcherPathRuleArray.to_proto(resource.path_rule):
res.path_rule.extend(
UrlMapPathMatcherPathRuleArray.to_proto(resource.path_rule)
)
if UrlMapPathMatcherRouteRuleArray.to_proto(resource.route_rule):
res.route_rule.extend(
UrlMapPathMatcherRouteRuleArray.to_proto(resource.route_rule)
)
if UrlMapHeaderAction.to_proto(resource.header_action):
res.header_action.CopyFrom(
UrlMapHeaderAction.to_proto(resource.header_action)
)
else:
res.ClearField("header_action")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapPathMatcher(
name=Primitive.from_proto(resource.name),
description=Primitive.from_proto(resource.description),
default_service=Primitive.from_proto(resource.default_service),
default_route_action=UrlMapDefaultRouteAction.from_proto(
resource.default_route_action
),
default_url_redirect=UrlMapPathMatcherDefaultUrlRedirect.from_proto(
resource.default_url_redirect
),
path_rule=UrlMapPathMatcherPathRuleArray.from_proto(resource.path_rule),
route_rule=UrlMapPathMatcherRouteRuleArray.from_proto(resource.route_rule),
header_action=UrlMapHeaderAction.from_proto(resource.header_action),
)
class UrlMapPathMatcherArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [UrlMapPathMatcher.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [UrlMapPathMatcher.from_proto(i) for i in resources]
class UrlMapPathMatcherDefaultUrlRedirect(object):
def __init__(
self,
host_redirect: str = None,
path_redirect: str = None,
prefix_redirect: str = None,
redirect_response_code: str = None,
https_redirect: bool = None,
strip_query: bool = None,
):
self.host_redirect = host_redirect
self.path_redirect = path_redirect
self.prefix_redirect = prefix_redirect
self.redirect_response_code = redirect_response_code
self.https_redirect = https_redirect
self.strip_query = strip_query
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = url_map_pb2.ComputeBetaUrlMapPathMatcherDefaultUrlRedirect()
if Primitive.to_proto(resource.host_redirect):
res.host_redirect = Primitive.to_proto(resource.host_redirect)
if Primitive.to_proto(resource.path_redirect):
res.path_redirect = Primitive.to_proto(resource.path_redirect)
if Primitive.to_proto(resource.prefix_redirect):
res.prefix_redirect = Primitive.to_proto(resource.prefix_redirect)
if UrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum.to_proto(
resource.redirect_response_code
):
res.redirect_response_code = UrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum.to_proto(
resource.redirect_response_code
)
if Primitive.to_proto(resource.https_redirect):
res.https_redirect = Primitive.to_proto(resource.https_redirect)
if Primitive.to_proto(resource.strip_query):
res.strip_query = Primitive.to_proto(resource.strip_query)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return UrlMapPathMatcherDefaultUrlRedirect(
host_redirect=Primitive.from_proto(resource.host_redirect),
path_redirect=Primitive.from_proto(resource.path_redirect),
prefix_redirect=Primitive.from_proto(resource.prefix_redirect),
redirect_response_code=UrlMapPathMatcherDefaultUrlRedirectRedirectResponseCodeEnum.from_proto(
resource.redirect_response_code
),
https_redirect=Primitive.from_proto(resource.https_redirect),
strip_query=Primitive.from_proto(resource.strip_query),
)
class UrlMapPathMatcherDefaultUrlRedirectArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [UrlMapPathMatcherDefaultUrlRedirect.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [UrlMapPathMatcherDefaultUrlRedirect.from_proto(i) for i in resources]
class UrlMapPathMatcherPathRule(object):
def __init__(
self,
backend_service: str = None,
| |
import numpy as np
import os
from numba import jit
fit_arg,blr,ns=False,0.1,'all'
log_path=os.path.join('./logs/avion/vol1/log_real_processed.csv')
# les booleans qui déterminent plusieurs comportements, voir coms
fit_on_v=fit_arg #est ce qu'on fit sur la vitesse prédite ou sur l'acc
used_logged_v_in_model=not fit_arg
bo=False
# si on utilise l'optimizer scipy, on passe 'scipy' en argument
# sinon le learning rate de la descente de gradient est base_lr
base_lr=1.0 if blr=="scipy" else blr
fit_strategy="scipy" if blr=="scipy" else "custom_gradient"
# si wind_signal, on considère la valeur du vent à chaque instant
# est un paramètre d'optimisation
# sinon, le vent est considéré comme une constante
wind_signal=False
assume_nul_wind=False # if true, le signal de vent constant vaut zéro
nsecs=ns
# nsecs désigne la taille du batch en secondes
# n_epochs=20 #le nombre d'epochs
# log_name désigne le nom des fichiers que l'on enregistrera
# on customisera chaque fichier avec le numero de l'epoch et la timestamp
# log_name="3_SEPTEMBRE_fit_v_%s_lr_%s_ns_%s"%(str(fit_on_v),str(base_lr) if blr!="scipy" else 'scipy',str(ns))
# CI DESSOUS LES PARAMETRES PROPRES AU MODELE
# Ci dessous, on décide quels paramètres on souhaite identifier
# id_mass=False
# id_wind=not assume_nul_wind
train_proportion=1.0 #proportion data train vs validation
# log_path=os.path.join('./logs/avion/vol1/log_real_processed.csv')
# save_dir_name="results"
# Paramètres utilitaires
mass=8.5
g=np.array([0,0,9.81])
Aire_1,Aire_2,Aire_3,Aire_4,Aire_5 = 0.62*0.262* 1.292 * 0.5,\
0.62*0.262* 1.292 * 0.5, \
0.34*0.01* 1.292 * 0.5,\
0.34*0.1* 1.292 * 0.5, \
1.08*0.31* 1.292 * 0.5
Aire_list = [Aire_1,Aire_2,Aire_3,Aire_4,Aire_5]
cp_1,cp_2,cp_3,cp_4,cp_5 = np.array([-0.013,0.475,-0.040], dtype=float).flatten(), \
np.array([-0.013,-0.475,-0.040], dtype=float).flatten(), \
np.array([-1.006,0.85,-0.134], dtype=float).flatten(),\
np.array([-1.006,-0.85,-0.134], dtype=float).flatten(),\
np.array([0.021,0,-0.064], dtype=float).flatten()
cp_list=[cp_1,cp_2,cp_3,cp_4,cp_5]
Area=np.pi*(11.0e-02)**2
r0=11e-02
rho0=1.204
kv_motor=800.0
pwmmin=1075.0
pwmmax=1950.0
U_batt=16.8
vwi0=0.0
vwj0=0.0
vwk0=0.0
alpha_0=0.07
alpha_s = 0.3391428111
delta_s = 15.0*np.pi/180
cd0sa_0 = 0.9
cd0fp_0 = 0.010
cd1sa_0 = 2
cl1sa_0 = 5
cd1fp_0 = 2.5
coeff_drag_shift_0= 0.5
coeff_lift_shift_0= 0.05
coeff_lift_gain_0= 2.5
C_t = 1.1e-4
C_q = 1e-8
C_h = 1e-4
# alpha_0= 0* 0.07
# alpha_s = 0.3391428111
# delta_s = 15.0*np.pi/180
# cd0sa_0 = 0* 0.010
# cd0fp_0 = 0* 0.010
# cd1sa_0 = 0* 4.55
# cl1sa_0 = 0* 5
# cd1fp_0 = 0* 2.5
# coeff_drag_shift_0= 0*0.5
# coeff_lift_shift_0= 0*0.05
# coeff_lift_gain_0= 0*2.5
# C_t = 1.1e-4
# C_q = 0*1e-8
# C_h = 0*1e-4
physical_params=[mass,
Area,
r0,
rho0,
kv_motor,
pwmmin,
pwmmax,
U_batt,
cd0sa_0,
cd0fp_0,
cd1sa_0,
cl1sa_0 ,
cd1fp_0,
coeff_drag_shift_0,
coeff_lift_shift_0,
coeff_lift_gain_0,
vwi0,
vwj0]
# Bounds and scaling factors
bounds={}
bounds['m']=(0,np.inf)
bounds['A']=(0,np.inf)
bounds['r']=(0,np.inf)
bounds['cd0sa']=(0,np.inf)
bounds['cd0fp']=(-np.inf,np.inf)
bounds['cd1sa']=(-np.inf,np.inf)
bounds['cl1sa']=(-np.inf,np.inf)
bounds['cd1fp']=(-np.inf,np.inf)
bounds['coeff_drag_shift']=(0,np.inf)
bounds['coeff_lift_shift']=(0,np.inf)
bounds['coeff_lift_gain']=(0,np.inf)
bounds['vw_i']=(-15,15)
bounds['vw_j']=(-15,15)
"scaler corresponds roughly to the power of ten of the parameter"
"it does not have to though, it may be used to improve the grad descent"
# scalers={}
# for i in bounds:
# scalers[i]=1.0
# %% ####### Saving function
# import json
# import datetime
# #generating a new log name if none is provided
# if log_name=="":
# # log_name=str(datetime.datetime.now().strftime("%d-%m-%Y_%H-%M-%S"))
# # spath=os.path.join(os.getcwd(),save_dir_name,log_name)
# #on enlève les logs avec le même nom
# import shutil
# try:
# shutil.rmtree(spath)
# except:
# pass
# os.makedirs(spath)
# #on sauvegarde les paramètres de départ
# with open(os.path.join(spath,'data.json'), 'w') as fp:
# json.dump(metap, fp)
# import pandas as pd
# # la fonction saver va être utilisée souvent
# def saver(name=None,save_path=os.path.join(os.getcwd(),"results_tests"),**kwargs):
# dname=str(int(time.time())) if (name is None) else name
# "le fonctionnement est simple:on prend tout ce qu'il y a dans **kwargs"
# "et on le met dans un dictionnaire qu'on save dans un .json"
# D={}
# for i in kwargs:
# if type(kwargs[i])==dict:
# for j in kwargs[i]:
# D[j]=kwargs[i][j]
# else:
# D[i]=kwargs[i].tolist() if isinstance(kwargs[i],np.ndarray) else kwargs[i]
# with open(os.path.join(save_path,'%s.json'%(dname)), 'w') as fp:
# json.dump(D, fp)
# return 0
# %% ####### SYMPY PROBLEM
" le but de cette partie est de génerer une fonction qui renvoie : "
" new_acc,new_v,sqerr_a,sqerr_v,Ja.T,Jv.T , c'est à dire "
" l'acc pred, la vitesse pred, l'erreur quad sur acc, l'erreur quad sur v"
" la jacobienne de l'erreur quad sur l'acc et la vitesse "
@jit(nogil=True)
def Rotation(R,angle):
c, s = np.cos(angle*np.pi/180), np.sin(angle*np.pi/180)
r = np.array([[1,0, 0], [0,c, -s],[0,s, c]] , dtype=np.float)
return R @ r
#CI DESSOUS : on spécifie quelles variables sont les variables d'identif
import dill
model_func = dill.load(open('./.Funcs/model_func_'+str(used_logged_v_in_model)+"simple_"+str(bo),'rb'))[0]
function_moteur_physique= dill.load(open('./.Funcs/model_func_'+str(used_logged_v_in_model)+"simple_"+str(bo),'rb'))[1]
# CI DESSOUS : on spécifie quelles variables sont les variables d'identif
import time
t7=time.time()
"cleansing memory"
# très recommandé d'effacer les variables de sympy pour éviter les soucis
# dans le restes des fonctions
# del(dt,m,cost_scaler_a,cost_scaler_v,
# vlog_i,vlog_j,vlog_k,
# vpred_i,vpred_j,vpred_k,
# alog_i,alog_j,alog_k,
# vnext_i,vnext_j,vnext_k,
# vw_i,vw_j,
# kt,
# b1,
# c1,c2,c3,
# ch1,ch2,
# di,dj,dk,
# rho,A,r,r1,r2,r3,r4,r5,r6,r7,r8,r9,R,
# omega_1,omega_2,omega_3,omega_4,omega_5,omega_6,
# omega_c1,omega_c2,omega_c3,omega_c4,omega_c5,omega_c6)
"test "
# %% ####### IMPORT DATA
# a la fin de ce bloc, on obtient une liste de datasets, correspondant
# aux batches.
print("LOADING DATA...")
import pandas as pd
raw_data=pd.read_csv(log_path)
print("PROCESSING DATA...")
prep_data=raw_data.drop(columns=[i for i in raw_data.keys() if (("forces" in i ) or ('pos' in i) or ("joy" in i)) ])
prep_data=prep_data.drop(columns=[i for i in raw_data.keys() if (("level" in i ) or ('Unnamed' in i) or ("index" in i)) ])
# on choisit tmin et tmax selon quel log on utilise
# if "vol12" in log_path:
# tmin,tmax=(-1,1e10)
# elif "vol1" in log_path:
# tmin,tmax=(41,265)
# elif "vol2" in log_path:
# tmin,tmax=(10,140)
# tmin=1
# tmax=500
# prep_data=prep_data[prep_data['t']>tmin]
# prep_data=prep_data[prep_data['t']<tmax]
prep_data=prep_data.reset_index()
for i in range(3):
prep_data['speed_pred[%i]'%(i)]=np.r_[prep_data['speed[%i]'%(i)].values[1:len(prep_data)],0]
prep_data['dt']=np.r_[prep_data['t'].values[1:]-prep_data['t'].values[:-1],0]
prep_data['t']-=prep_data['t'][0]
prep_data=prep_data.drop(index=[0,len(prep_data)-1])
prep_data=prep_data.reset_index()
data_prepared=prep_data[:len(prep_data)//50]
data_prepared=prep_data
def scale_to_01(df):
return (df-df.min())/(df.max()-df.min())
for i in range(6):
data_prepared['omega_c[%i]'%(i+1)]=scale_to_01(data_prepared['PWM_motor[%i]'%(i+1)])*925
"splitting the dataset into nsec"
data_batches=[data_prepared]
N_train_batches=1
N_val_batches=0
print("DATA PROCESS DONE")
# print("Importing model func...")
# %% ####### Identification Data Struct
# On répartit les variables entre deux dicts: id_variables et non_id_variables
# Cette étape est liée, par les booléens utilisés, à la premi_re étape
non_id_variables={"m":mass,
"A":Area,
"r":r0,
"rho":rho0,
'cd0sa':cd0sa_0,
'cd0fp':cd0fp_0,
'cd1sa':cd1sa_0,
'cl1sa':cl1sa_0,
'cd1fp':cd1fp_0,
'coeff_drag_shift':coeff_drag_shift_0,
'coeff_lift_shift':coeff_lift_shift_0,
'coeff_lift_gain':coeff_lift_gain_0,
"vw_i":vwi0,
"vw_j":vwj0,
"vw_k":vwk0,
"alpha_stall":alpha_s,
"largeur_stall":delta_s,
"Ct": C_t,
"Cq": C_q,
"Ch": C_h
}
id_variables={}
for key_ in ('cd0sa','cd0fp',
'cd1sa','cl1sa','cd1fp',
'coeff_drag_shift','coeff_lift_shift',
'coeff_lift_gain'):
id_variables[key_]=non_id_variables[key_]
if wind_signal:
id_variables['vw_i']=vwi0*np.zeros(len(data_prepared))
id_variables['vw_j']=vwj0*np.zeros(len(data_prepared))
"cleaning non_id_variables to avoid having variables in both dicts"
rem=[]
for i in non_id_variables.keys():
if i in id_variables.keys():
rem.append(i)
for j in rem:
del(non_id_variables[j])
# for i in id_variables.keys():
# id_variables[i]=id_variables[i]/scalers[i]
# %% ####### MODEL function
# ici, on définit les fonctions que l'on appellera dans la partie
# optimisation.
import transforms3d as tf3d
# import copy
@jit(nogil=True)
def arg_wrapping(batch,id_variables,data_index,speed_pred_previous):
"cette fonction sert à fabriquer, à partir des inputs, l'argument que "
"l'on enverra en input à la fonction lambdifiée de la partie sympy "
"batch est un dataframe"
"id_variables sont les variables d'identification"
"scalers sont les coefficients de mise à l'échelle"
"data_index est un entier, qui sert à récupérer la bonne valeur "
"dans le batch de données"
"speed_pred_previous est la vitesse prédite précédente"
"omegas_pred est la vitesse angulaire pred précédente,à partir de kt"
i=data_index
# cost_scaler_v=1.0
# cost_scaler_a=1.0
dt=min(batch['dt'][i],1e-2)
vlog_i,vlog_j,vlog_k=batch['speed[0]'][i],batch['speed[1]'][i],batch['speed[2]'][i]
v_log = np.array([[vlog_i],
[vlog_j],
[vlog_k]])
vpred_i,vpred_j,vpred_k=speed_pred_previous
v_pred=np.array([[vpred_i],
[vpred_j],
[vpred_k]])
alog_i,alog_j,alog_k=batch['acc_ned_grad[0]'][i],batch['acc_ned_grad[1]'][i],batch['acc_ned_grad[2]'][i]
alog=np.array([[alog_i],
[alog_j],
[alog_k]])
# vnext_i,vnext_j,vnext_k=batch['speed[0]'][i],batch['speed[1]'][i],batch['speed[2]'][i]
m=non_id_variables['m'] if 'm' in non_id_variables else id_variables['m']
vw_i=non_id_variables['vw_i'] if 'vw_i' in non_id_variables else id_variables['vw_i']
vw_j=non_id_variables['vw_j'] if 'vw_j' in non_id_variables else id_variables['vw_j']
if wind_signal:
vw_i,vw_j=vw_i[i],vw_j[i]
v_W=np.array([[vw_i],
[vw_j],
[vwk0]])
Omega=np.zeros(3)
cd0sa=non_id_variables['cd0sa'] if 'cd0sa' in non_id_variables else id_variables['cd0sa']
cd0fp=non_id_variables['cd0fp'] if 'cd0fp' in non_id_variables else id_variables['cd0fp']
cd1sa=non_id_variables['cd1sa'] if 'cd1sa' in non_id_variables else id_variables['cd1sa']
cl1sa=non_id_variables['cl1sa'] if 'cl1sa' in non_id_variables else id_variables['cl1sa']
cd1fp=non_id_variables['cd1fp'] if 'cd1fp' in non_id_variables else id_variables['cd1fp']
coeff_drag_shift=non_id_variables['coeff_drag_shift'] if 'coeff_drag_shift' in non_id_variables else id_variables['coeff_drag_shift']
coeff_lift_shift=non_id_variables['coeff_lift_shift'] if 'coeff_lift_shift' in non_id_variables else id_variables['coeff_lift_shift']
coeff_lift_gain=non_id_variables['coeff_lift_gain'] if 'coeff_lift_gain' in non_id_variables else id_variables['coeff_lift_gain']
R=tf3d.quaternions.quat2mat(np.array([batch['q[%i]'%(j)][i] for j in range(4)]))
R_list =[R,R,Rotation(R,-45),Rotation(R,-135),R]
"reverse mixing"
pwm_null_angle=1527
RW_delta=batch['PWM_motor[1]'][i]-pwm_null_angle
LW_delta=batch['PWM_motor[2]'][i]-pwm_null_angle
RVT_delta=batch['PWM_motor[3]'][i]-pwm_null_angle
LVT_delta=batch['PWM_motor[4]'][i]-pwm_null_angle
delta0_list=np.array([RW_delta,-LW_delta,RVT_delta,-LVT_delta,0.0])
delta_pwm=500
delta0_list/=delta_pwm
delta0_list=delta0_list*15.0/180.0*np.pi
## Commande entre -15:15 pour les 4 premier terme, le dernier terme vaut 0 (pour l'homogéinité des longueur)
omega_rotor = batch['omega_c[5]'][i] ## Vitesse de rotation des helices (supposé les mêmes pour les deux moteurs)
alpha_list=[0,0,0,0,0]
for p, cp in enumerate(cp_list) : # Cette boucle calcul les coefs aéro pour chaque surface
VelinLDPlane = function_moteur_physique[0](Omega, cp, v_pred.flatten(), v_W.flatten(), R_list[p].flatten())
dragDirection = function_moteur_physique[1](Omega, cp, v_pred.flatten(), v_W.flatten(), R_list[p].flatten())
liftDirection = function_moteur_physique[2](Omega, cp, v_pred.flatten(), v_W.flatten(), R_list[p].flatten())
alpha_list[p] = -function_moteur_physique[3](dragDirection, liftDirection, np.array([[1],[0],[0]]).flatten(), VelinLDPlane)
X=(alog.flatten(),v_log.flatten(),dt, Aire_list, Omega.flatten(), R.flatten(), v_pred.flatten(), v_W.flatten(), cp_list, alpha_list, alpha_0, \
alpha_s, delta0_list.flatten(), delta_s, cl1sa, cd1fp, coeff_drag_shift, coeff_lift_shift, coeff_lift_gain,\
cd0fp, cd0sa, cd1sa, C_t, C_q, C_h, omega_rotor, g.flatten(), m)
return X
@jit(nogil=True)
def pred_on_batch(batch,id_variables):
"si n est la taille du batch"
"cette fonction sert à appeler n fois la fonction lambdifiée"
" de sympy "
"on obtient n acc prédites, n vitesses prédites, n jacobiennes...."
"batch est un dataframe"
"id_variables sont les variables d'identification"
"scalers sont les coefficients de mise à l'échelle"
acc_pred=np.zeros((len(batch),3))
speed_pred=np.zeros((len(batch),3))
square_error_a=np.zeros((len(batch),1))
square_error_v=np.zeros((len(batch),1))
jac_error_a=np.zeros((len(batch),len(id_variables)))
jac_error_v=np.zeros((len(batch),len(id_variables)))
for i in batch.index:
print("\r Pred on batch %i / %i "%(i,max(batch.index)), end='', flush=True)
speed_pred_prev=speed_pred[i-1] if i>min(batch.index) else (batch['speed[0]'][i],batch['speed[1]'][i],batch['speed[2]'][i])
X=arg_wrapping(batch,id_variables,i,speed_pred_prev)
Y=model_func(*X)
# print(Y)
acc_pred[i]=Y[:3].reshape(3,)
speed_pred[i]=Y[3:6].reshape(3,)
square_error_a[i]=Y[6:7].reshape(1,)
square_error_v[i]=Y[7:8].reshape(1,)
jac_error_a[i]=Y[8:8+len(id_variables)].reshape(len(id_variables),)
jac_error_v[i]=Y[8+len(id_variables):8+2*len(id_variables)].reshape(len(id_variables),)
return acc_pred,speed_pred,square_error_a,square_error_v,jac_error_a,jac_error_v
# %% Gradient
# import random
def X_to_dict(X,base_dict=id_variables):
"sert à transformer un vecteur en dictionnaire "
out_dict={}
index_j=0
for i,key in enumerate(base_dict.keys()):
out_dict[key]=X[index_j:index_j+len(base_dict[key])] if isinstance(base_dict[key],np.ndarray) else X[index_j]
index_j+=len(base_dict[key]) if isinstance(base_dict[key],np.ndarray) else 1
return out_dict
def dict_to_X(input_dict):
"sert à transformer un dictinonaire en vecteur "
out=np.r_[tuple([np.array(input_dict[key]).flatten() for key in input_dict])]
| |
<reponame>mightqxc/panda-atm<filename>pandaatm/atmbody/slow_task_analyzer.py
import os
import time
import datetime
import copy
import json
from pandacommon.pandalogger import logger_utils
from pandaatm.atmconfig import atm_config
from pandaatm.atmcore import core_utils
from pandaatm.atmbody.agent_base import AgentBase
from pandaatm.atmutils.slow_task_analyzer_utils import get_job_durations, get_jobs_time_consumption_statistics, bad_job_test_main
base_logger = logger_utils.setup_logger('slow_task_analyzer')
# agent class
class SlowTaskAnalyzer(AgentBase):
def __init__(self):
super().__init__()
# parameters
self.sleepPeriod = 300
self.sinceHours = 336
self.taskDurationMaxHours = 168
self.taskSuccefulRunTimeMinPercent = 80
self.taskEachStatusMaxHours = 12
self.joblessIntervalMaxHours = 16
self.jobBadTimeMaxPercent = 10
self.jobMaxHoursMap = {
'finished': {'wait': 16, 'run': 96},
'failed': {'wait': 16, 'run': 16},
'cancelled': {'wait': 16, 'run': 16},
'closed': {'wait': 12, 'run': 16},
}
self.reportDir = '/tmp/slow_task_dumps'
def _slow_task_attempts_display(self, ret_dict: dict) -> str :
result_str_line_template = '{jediTaskID:>10} {attemptNr:>4} | {finalStatus:>10} {startTime:>20} {endTime:>20} {attemptDuration:>15} {successful_run_time_ratio:>6} '
result_str_list = []
result_str_list.append(result_str_line_template.format(jediTaskID='taskID', attemptNr='#N', finalStatus='status', startTime='startTime', endTime='endTime', attemptDuration='duration', successful_run_time_ratio='SRTR%'))
for k in sorted(ret_dict):
v = ret_dict[k]
result_str_line = result_str_line_template.format(
jediTaskID=k[0],
attemptNr=k[1],
finalStatus=v['finalStatus'],
startTime=v['startTime'].strftime('%y-%m-%d %H:%M:%S'),
endTime=v['endTime'].strftime('%y-%m-%d %H:%M:%S'),
attemptDuration=core_utils.timedelta_parse_dict(v['attemptDuration'])['str_dcolon'],
successful_run_time_ratio='{0:.2f}%'.format(v['jobs_time_consumption_stats_dict']['_successful_run_time_ratio']*100),
)
result_str_list.append(result_str_line)
return '\n'.join(result_str_list)
def _get_job_attr_dict(self, jobspec):
wait_duration, run_duration = get_job_durations(jobspec)
diag_display_str_list = []
if jobspec.transExitCode not in (None, 0, 'NULL', '0'):
diag_display_str_list.append('trans-{0}'.format(jobspec.transExitCode))
if jobspec.pilotErrorCode not in (None, 0, 'NULL', '0'):
diag_display_str_list.append('pilot-{0}: {1}'.format(jobspec.pilotErrorCode, jobspec.pilotErrorDiag))
if jobspec.exeErrorCode not in (None, 0, 'NULL', '0'):
diag_display_str_list.append('exe-{0}: {1}'.format(jobspec.exeErrorCode, jobspec.exeErrorDiag))
if jobspec.ddmErrorCode not in (None, 0, 'NULL', '0'):
diag_display_str_list.append('ddm-{0}: {1}'.format(jobspec.ddmErrorCode, jobspec.ddmErrorDiag))
if jobspec.brokerageErrorCode not in (None, 0, 'NULL', '0'):
diag_display_str_list.append('brokr-{0}: {1}'.format(jobspec.brokerageErrorCode, jobspec.brokerageErrorDiag))
if jobspec.jobDispatcherErrorCode not in (None, 0, 'NULL', '0'):
diag_display_str_list.append('jdisp-{0}: {1}'.format(jobspec.jobDispatcherErrorCode, jobspec.jobDispatcherErrorDiag))
if jobspec.taskBufferErrorCode not in (None, 0, 'NULL', '0'):
diag_display_str_list.append('tbuf-{0}: {1}'.format(jobspec.taskBufferErrorCode, jobspec.taskBufferErrorDiag))
if jobspec.supErrorCode not in (None, 0, 'NULL', '0'):
diag_display_str_list.append('sup-{0}: {1}'.format(jobspec.supErrorCode, jobspec.supErrorDiag))
retDict = {
'PandaID': jobspec.PandaID,
'jobStatus': jobspec.jobStatus,
'priority': jobspec.currentPriority,
'waitDuration': wait_duration,
'runDuration': run_duration,
'errorInfo': '{0:>11} | {1:>24} | '.format(jobspec.jobStatus, jobspec.computingSite) + '; '.join(diag_display_str_list),
}
return retDict
def _get_task_status_log(self, status_list):
status_log_list = []
last_modification_time = None
for i in range(len(status_list)):
status, modificationTime = status_list[i]
if i >= 1:
last_duration = modificationTime - last_modification_time
status_log_list[i-1]['duration'] = last_duration
status_log_dict = {
'status': status,
'modificationTime': modificationTime,
'duration': datetime.timedelta(),
}
status_log_list.append(status_log_dict)
last_modification_time = modificationTime
return status_log_list
def _search_long_status(self, status_log_list):
long_status_log_list = []
for status_log_dict in status_log_list:
if status_log_dict['status'] not in ('scouting', 'running', 'processing') \
and status_log_dict['duration'] > datetime.timedelta(hours=self.taskEachStatusMaxHours):
long_status_log_list.append(status_log_dict)
return long_status_log_list
def _long_status_display(self, long_status_log_list) -> str:
result_str_line_template = ' {status:>11} | {modificationTime:>17} | {duration:>15}'
result_str_list = []
result_str_list.append(result_str_line_template.format(
status='status',
modificationTime='modificationTime',
duration='duration',
))
for status_log_dict in long_status_log_list:
result_str_list.append(result_str_line_template.format(
status=status_log_dict['status'],
modificationTime=status_log_dict['modificationTime'].strftime('%y-%m-%d %H:%M:%S'),
duration=core_utils.timedelta_parse_dict(status_log_dict['duration'])['str_dcolon'],
))
result_str = '\n'.join(result_str_list)
return result_str
def _search_bad_intervals(self, jobspec_list, attempt_start):
bad_interval_list = []
last_jobful_time = attempt_start
last_job_pandaid = None
for jobspec in sorted(jobspec_list, key=(lambda x: x.creationTime)):
if jobspec.endTime <= last_jobful_time:
continue
if jobspec.creationTime <= last_jobful_time:
# jobs duration overlap, good
pass
else:
# jobless interval
interval = jobspec.creationTime - last_jobful_time
if interval > datetime.timedelta(hours=self.joblessIntervalMaxHours):
bad_interval_dict = {
'duration': interval,
'lastJobPandaID': last_job_pandaid,
'lastJobEndTime': last_jobful_time,
'nextJobPandaID': jobspec.PandaID,
'nextJobCreationTime': jobspec.creationTime,
}
bad_interval_list.append(bad_interval_dict)
last_jobful_time = jobspec.endTime
last_job_pandaid = jobspec.PandaID
return bad_interval_list
def _bad_intervals_display(self, bad_interval_list) -> str:
result_str_line_template = ' {lastJobPandaID:>20} , {lastJobEndTime_str:>17} | {nextJobPandaID:>20} , {nextJobCreationTime_str:>17} | {duration_str:>15}'
result_str_list = []
result_str_list.append(result_str_line_template.format(
lastJobPandaID='PreviousJob PanDAID',
lastJobEndTime_str='Ended At',
nextJobPandaID='FollowingJob PanDAID',
nextJobCreationTime_str='Created At',
duration_str='Duration',
))
for gap in bad_interval_list:
result_str_list.append(result_str_line_template.format(
lastJobPandaID=gap['lastJobPandaID'],
lastJobEndTime_str=gap['lastJobEndTime'].strftime('%y-%m-%d %H:%M:%S'),
nextJobPandaID=gap['nextJobPandaID'],
nextJobCreationTime_str=gap['nextJobCreationTime'].strftime('%y-%m-%d %H:%M:%S'),
duration_str=core_utils.timedelta_parse_dict(gap['duration'])['str_dcolon'],
))
result_str = '\n'.join(result_str_list)
return result_str
def _bad_job_time_consumed_set(self, task_attempt_duration, jobs_time_consumption_stats_dict):
ret_msg_set = set()
for status in ['finished', 'failed', 'closed', 'cancelled']:
for dur_type in ['wait', 'run']:
if (status, dur_type) == ('finished', 'run'):
continue
if jobs_time_consumption_stats_dict[status][dur_type]*100/task_attempt_duration >= self.jobBadTimeMaxPercent:
msg_tag = 'Job{0}{1}Long'.format(status.capitalize(), dur_type.capitalize())
ret_msg_set.add(msg_tag)
return ret_msg_set
def _bad_job_qualify(self, job_attr_dict):
retVal = False
status = job_attr_dict['jobStatus']
# according to status
for status in ['finished', 'failed', 'closed', 'cancelled']:
if status != job_attr_dict['jobStatus']:
continue
for dur_type in ['wait', 'run']:
dur_name = '{0}Duration'.format(dur_type)
if job_attr_dict[dur_name] > datetime.timedelta(hours=self.jobMaxHoursMap[status][dur_type]):
retVal = True
return retVal
def _bad_jobs_display(self, pandaid_list, err_info_dict) -> str:
sorted_err_info_list = sorted(err_info_dict.items(), key=(lambda x: (x[1]['n_jobs'], x[1]['waitDuration'] + x[1]['runDuration'])), reverse=True)
errors_str = '\n '.join([ '{n_jobs:>6} | {avg_wait:>12} | {avg_run:>12} | {avg_prio:>7} | {info}'.format(
n_jobs=x[1]['n_jobs'],
avg_wait=core_utils.timedelta_parse_dict(x[1]['waitDuration']/x[1]['n_jobs'])['str_dcolon'],
avg_run=core_utils.timedelta_parse_dict(x[1]['runDuration']/x[1]['n_jobs'])['str_dcolon'],
avg_prio=int(x[1]['priority']/x[1]['n_jobs']),
info=x[0]
) for x in sorted_err_info_list ])
result_str = (
' Jobs: {pandaids_str}\n'
' NJobs | AvgWaiting | AvgRunning | AvgPrio | jobStatus | computingSite | Dialogs \n'
' {errors_str}\n'
).format(
pandaids_str=','.join(sorted([ str(i) for i in pandaid_list ])),
errors_str=errors_str
)
return result_str
def _jobs_time_consumption_stats_display(self, task_attempt_duration, jobs_time_consumption_stats_dict) -> str:
# function to format one data record
def get_data_str(data_time):
data_str = '{duration:>13} ({percent:>2}%)'.format(
duration=core_utils.timedelta_parse_dict(data_time)['str_dcolon'],
percent=int(data_time*100/task_attempt_duration),
)
return data_str
# dict result dict
result_dict = {
'wfn': get_data_str(jobs_time_consumption_stats_dict['finished']['wait']),
'wfa': get_data_str(jobs_time_consumption_stats_dict['failed']['wait']),
'wcl': get_data_str(jobs_time_consumption_stats_dict['closed']['wait']),
'wcn': get_data_str(jobs_time_consumption_stats_dict['cancelled']['wait']),
'wto': get_data_str(jobs_time_consumption_stats_dict['total']['wait']),
'rfn': get_data_str(jobs_time_consumption_stats_dict['finished']['run']),
'rfa': get_data_str(jobs_time_consumption_stats_dict['failed']['run']),
'rcl': get_data_str(jobs_time_consumption_stats_dict['closed']['run']),
'rcn': get_data_str(jobs_time_consumption_stats_dict['cancelled']['run']),
'rto': get_data_str(jobs_time_consumption_stats_dict['total']['run']),
'tfn': get_data_str(jobs_time_consumption_stats_dict['finished']['total']),
'tfa': get_data_str(jobs_time_consumption_stats_dict['failed']['total']),
'tcl': get_data_str(jobs_time_consumption_stats_dict['closed']['total']),
'tcn': get_data_str(jobs_time_consumption_stats_dict['cancelled']['total']),
'tto': get_data_str(jobs_time_consumption_stats_dict['total']['total']),
}
# get result string
result_str = (
' | waiting | running | total | \n'
' finished | {wfn:>20} | {rfn:>20} | {tfn:>20} | \n'
' failed | {wfa:>20} | {rfa:>20} | {tfa:>20} | \n'
' closed | {wcl:>20} | {rcl:>20} | {tcl:>20} | \n'
' cancelled | {wcn:>20} | {rcn:>20} | {tcn:>20} | \n'
' total | {wto:>20} | {rto:>20} | {tto:>20} | \n'
'\n'
' {summary}\n'
).format(
summary='jobful time: {0:.2f}% , successful run time: {1:.2f}%'.format(
jobs_time_consumption_stats_dict['_jobful_time_ratio']*100,
jobs_time_consumption_stats_dict['_successful_run_time_ratio']*100),
**result_dict
)
return result_str
def run(self):
tmp_log = logger_utils.make_logger(base_logger, method_name='SlowTaskAnalyzer.run')
while True:
# start
tmp_log.info('start cycle')
# make report file
timeNow = datetime.datetime.utcnow()
report_file = os.path.join(self.reportDir, 'slow_tasks_{0}.txt'.format(timeNow.strftime('%y%m%d_%H%M%S')))
with open(report_file, 'w') as dump_file:
# dump opening information
dump_str = (
'Report created at {timestamp}\n\n'
'Parameters:\n'
'sinceHours = {sinceHours}\n'
'taskDurationMaxHours = {taskDurationMaxHours}\n'
'taskSuccefulRunTimeMinPercent = {taskSuccefulRunTimeMinPercent}\n'
'taskEachStatusMaxHours = {taskEachStatusMaxHours}\n'
'joblessIntervalMaxHours = {joblessIntervalMaxHours}\n'
'jobBadTimeMaxPercent = {jobBadTimeMaxPercent}\n'
'jobMaxHoursMap = {jobMaxHoursMap}\n'
'\n'
).format(
timestamp=timeNow.strftime('%y-%m-%d %H:%M:%S'),
sinceHours=self.sinceHours,
taskDurationMaxHours=self.taskDurationMaxHours,
taskSuccefulRunTimeMinPercent=self.taskSuccefulRunTimeMinPercent,
taskEachStatusMaxHours=self.taskEachStatusMaxHours,
joblessIntervalMaxHours=self.joblessIntervalMaxHours,
jobBadTimeMaxPercent=self.jobBadTimeMaxPercent,
jobMaxHoursMap=self.jobMaxHoursMap,
)
dump_file.write(dump_str)
# candidate slow task attempts
tmp_log.debug('fetching candidate slow task attempts created since {0} hours ago'.format(self.sinceHours))
created_since = datetime.datetime.utcnow() - datetime.timedelta(hours=self.sinceHours)
task_duration = datetime.timedelta(hours=self.taskDurationMaxHours)
cand_ret_dict = self.dbProxy.slowTaskAttemptsFilter01_ATM(created_since=created_since, prod_source_label=None, task_duration=task_duration)
# filter to get slow task attempts
tmp_log.debug('filtering slow task attempts')
ret_dict = {}
for k, v in cand_ret_dict.items():
jediTaskID, attemptNr = k
key_name = '{0}_{1:02}'.format(*k)
new_v = copy.deepcopy(v)
jobspec_list = self.dbProxy.slowTaskJobsInAttempt_ATM(jediTaskID=jediTaskID, attemptNr=attemptNr,
attempt_start=v['startTime'], attempt_end=v['endTime'])
# time consumption statistics of jobs
task_attempt_duration = v['attemptDuration']
jobs_time_consumption_stats_dict = get_jobs_time_consumption_statistics(jobspec_list)
jobful_time_ratio = jobs_time_consumption_stats_dict['total']['total'] / task_attempt_duration
successful_run_time_ratio = jobs_time_consumption_stats_dict['finished']['run'] / task_attempt_duration
jobs_time_consumption_stats_dict['_jobful_time_ratio'] = jobful_time_ratio
jobs_time_consumption_stats_dict['_successful_run_time_ratio'] = successful_run_time_ratio
# fill new value dictionary
new_v['jobspec_list'] = jobspec_list
new_v['jobs_time_consumption_stats_dict'] = jobs_time_consumption_stats_dict
# more criteria of slow task
if successful_run_time_ratio*100 < self.taskSuccefulRunTimeMinPercent:
# successful run time occupied too little percentage of task duration
ret_dict[k] = new_v
tmp_log.debug('got a slow task attempt {0}'.format(key_name))
n_slow_task_attempts = len(ret_dict)
dump_str = 'got {0} slow task attempts: \n{1}\n'.format(n_slow_task_attempts, self._slow_task_attempts_display(ret_dict))
dump_file.write(dump_str)
tmp_log.debug(dump_str)
# get culprits of slow task attempts
tmp_log.debug('fetching culprits')
dump_str = dump_str = '\n' + '='*64 + '\n' + 'Culprits of slowness:' + '\n\n'
dump_file.write(dump_str)
for k in sorted(ret_dict):
jediTaskID, attemptNr = k
dump_str = 'About jediTaskID={0} , attemptNr={1} \n\n'.format(jediTaskID, attemptNr)
dump_file.write(dump_str)
key_name = '{0}_{1:02}'.format(*k)
new_v = ret_dict[k]
slow_reason_set = set()
task_attempt_duration = new_v['attemptDuration']
jobspec_list = new_v['jobspec_list']
jobs_time_consumption_stats_dict = new_v['jobs_time_consumption_stats_dict']
# culprit task status (stuck long)
long_status_log_list = self._search_long_status(self._get_task_status_log(new_v['statusList']))
n_long_status = len(long_status_log_list)
bad_status_str = ','.join(sorted({ x['status'] for x in long_status_log_list }))
if n_long_status == 0:
tmp_log.debug('taskID_attempt={0} got 0 long status'.format(key_name))
else:
long_status_display_str = self._long_status_display(long_status_log_list)
dump_str = 'taskID_attempt={0} got {1} long status: \n{2}\n\n\n'.format(key_name, n_long_status, long_status_display_str)
dump_file.write(dump_str)
tmp_log.debug(dump_str)
slow_reason_set.add('TaskStatusLong')
# culprit intervals between jobs
bad_interval_list = self._search_bad_intervals(jobspec_list, new_v['startTime'])
n_bad_intervals = len(bad_interval_list)
if n_bad_intervals == 0:
tmp_log.debug('taskID_attempt={0} got 0 culprit intervals'.format(key_name))
else:
bad_intervals_display_str = self._bad_intervals_display(bad_interval_list)
slow_reason_set.add('JoblessIntervalLong')
dump_str = 'taskID_attempt={0} got {1} culprit intervals: \n{2}\n\n\n'.format(key_name, n_bad_intervals, bad_intervals_display_str)
dump_file.write(dump_str)
tmp_log.debug(dump_str)
# time consumption statistics of jobs
jobs_time_consumption_stats_display = self._jobs_time_consumption_stats_display(task_attempt_duration, jobs_time_consumption_stats_dict)
dump_str = 'taskID_attempt={0} time consumption stats of jobs: \n{1}\n'.format(key_name, jobs_time_consumption_stats_display)
dump_file.write(dump_str)
tmp_log.debug(dump_str)
# job symptom tags according to time consumption
job_slow_reason_set = self._bad_job_time_consumed_set(task_attempt_duration, jobs_time_consumption_stats_dict)
if not job_slow_reason_set:
tmp_log.debug('taskID_attempt={0} had no bad job symptom'.format(key_name))
else:
slow_reason_set |= job_slow_reason_set
dump_str = 'taskID_attempt={0} got bad job symptoms: {1}\n\n'.format(key_name, ','.join(sorted(job_slow_reason_set)))
dump_file.write(dump_str)
tmp_log.debug(dump_str)
# find some bad jobs as hint
pandaid_list = []
err_info_dict = {}
for jobspec in jobspec_list:
job_attr_dict = self._get_job_attr_dict(jobspec)
retVal = self._bad_job_qualify(job_attr_dict)
if retVal:
# qualified bad job
pandaid_list.append(jobspec.PandaID)
err_info = job_attr_dict['errorInfo']
if err_info in err_info_dict:
err_info_dict[err_info]['n_jobs'] += 1
err_info_dict[err_info]['waitDuration'] += job_attr_dict['waitDuration']
err_info_dict[err_info]['runDuration'] += job_attr_dict['runDuration']
err_info_dict[err_info]['priority'] += job_attr_dict['priority']
else:
err_info_dict[err_info] = {}
err_info_dict[err_info]['n_jobs'] = 1
err_info_dict[err_info]['waitDuration'] = job_attr_dict['waitDuration']
err_info_dict[err_info]['runDuration'] = job_attr_dict['runDuration']
err_info_dict[err_info]['priority'] = job_attr_dict['priority']
n_bad_jobs = len(pandaid_list)
if n_bad_jobs == 0:
tmp_log.debug('taskID_attempt={0} got 0 bad jobs'.format(key_name))
| |
# -*- coding: utf-8 -*-
#
# Parse a RSD log file.
#
# This class is used by KanataConverter.
#
# There are several IDs in this script:
#
# iid: An unique id for each instruction in a RSD log file.
# This is outputted from SystemVerilog code.
# mid: The index of a micro op in an instruction.
# This is outputted from SystemVerilog code.
#
# gid: An unique id for each mico op in this script.
# This is calculated from iid and mid for internal use.
# cid: An unique id for each 'committed' micro op in a Kanata log.
# This is calculated from gid.
#
import re
#
# Global constants
#
RSD_PARSER_INITIAL_CYCLE = -1
RSD_PARSER_RETIREMENT_STAGE_ID = 14
RSD_PARSER_CID_DEFAULT = -1
class RSD_ParserError( Exception ):
""" An exeception class for RSD_Parser """
pass
class RSD_Parser( object ):
""" Parse RSD log data. """
# Constants
# RSD log command strings.
RSD_HEADER = "RSD_Kanata"
RSD_VERSION = 0
RSD_CMD_LABEL = "L"
RSD_CMD_STAGE = "S"
RSD_CMD_CYCLE = "C"
RSD_CMD_COMMENT = "#"
# Bit width of OpSerial in SystemVerilog code.
# OpSerial becomes iid in this python script.
OP_SERIAL_WIDTH = 10
# Max micro ops per an instruction.
MAX_MICRO_OPS_PER_INSN = 4
# Wrap around of gid caused by wrap around of iid.
GID_WRAP_AROUND = 2 ** OP_SERIAL_WIDTH * MAX_MICRO_OPS_PER_INSN
def CreateGID( self, iid, mid ):
""" Create unique 'gid' from 'iid' and 'mid'.
"""
if mid >= RSD_Parser.MAX_MICRO_OPS_PER_INSN:
raise RSD_ParserError( "'mid' exceeds MAX_MICRO_OPS_PER_INSN at iid(%s)" % iid )
gid = mid + iid * RSD_Parser.MAX_MICRO_OPS_PER_INSN
# Convert a gid wrapped around to a unique gid.
# - Search min( n : gid + n * GID_WRAP_AROUND > max-retired-gid - margin )
# - margin : GID_WRAP_AROUND / 2
# - Set gid = gid + n * GID_WRAP_AROUND
if len( self.retired ) != 0:
numWrapAround = ( self.maxRetiredOp + RSD_Parser.GID_WRAP_AROUND / 2 - gid ) / RSD_Parser.GID_WRAP_AROUND
gid += RSD_Parser.GID_WRAP_AROUND * numWrapAround
return gid
# Event types
EVENT_INIT = 0
EVENT_STAGE_BEGIN = 1
EVENT_STAGE_END = 2
EVENT_STALL_BEGIN = 3
EVENT_STALL_END = 4
EVENT_RETIRE = 5
EVENT_FLUSH = 6
#
# Internal classes
#
class Label( object ):
""" Label information of an op """
def __init__( self ):
self.iid = 0
self.mid = 0
self.pc = 0
self.code = ""
def __repr__( self ):
return "{iid:%d, mid:%d, pc: %s, code: %s}" % ( self.iid, self.mid, self.pc, self.code )
class Op( object ):
""" Op class. """
def __init__( self, iid, mid, gid, stall, clear, stageID, updatedCycle ):
self.iid = iid
self.mid = mid
self.gid = gid
self.cid = RSD_PARSER_CID_DEFAULT
self.stageID = stageID
self.stall = stall
self.clear = clear
self.updatedCycle = updatedCycle
self.commit = False
self.label = RSD_Parser.Label()
def __repr__( self ):
return (
"{gid:%s, cid: %s, stall:%s, clear:%s, stageID:%s, updatedCycle: %s}" %
( self.gid, self.cid, self.stall, self.clear, self.stageID, self.updatedCycle )
)
class Event( object ):
""" Event class """
def __init__( self, gid, type, stageID, comment ):
self.gid = gid
self.type = type
self.stageID = stageID
self.comment = comment
def __repr__( self ):
return (
"{gid:%s, type:%s, stageID:%s, comment:%s}" %
( self.gid, self.type, self.stageID, self.comment )
)
def __init__( self ):
""" Initializer """
self.inputFileName = ""
self.inputFile = None
# A current processor cycle.
self.currentCycle = RSD_PARSER_INITIAL_CYCLE
self.ops = {} # gid -> Op map
self.events = {} # cycle -> Event map
self.retired = set( [] ) # retired gids
self.maxRetiredOp = 0; # The maximum number in retired ops.
self.committedOpNum = 0; # Num of committed ops.
def Open( self, inputFileName ):
self.inputFileName = inputFileName
self.inputFile = open( inputFileName, "r" );
def Close( self ):
if self.inputFile is not None :
self.inputFile.close()
#
# Parsing file
#
def ProcessHeader( self, line ):
""" Process a file hedaer """
words = re.split( r"[\t\n\r]", line )
header = words[0]
if header != self.RSD_HEADER :
raise RSD_ParserError(
"An unknown file format."
)
# Check a file version
version = int( words[1] )
if version != self.RSD_VERSION :
raise RSD_ParserError(
"An unknown file version: %d" % ( version )
)
def ProcessLine( self, line ):
""" Process a line. """
words = re.split( r"[\t\n\r]", line )
cmd = words[0]
if cmd == self.RSD_CMD_STAGE :
self.OnRSD_Stage( words )
elif cmd == self.RSD_CMD_LABEL :
self.OnRSD_Label( words )
elif cmd == self.RSD_CMD_CYCLE :
self.OnRSD_Cycle( words )
elif cmd == self.RSD_CMD_COMMENT :
pass # A comment is not processed.
else:
raise RSD_ParserError( "Unknown command:'%s'" % (cmd) );
def OnRSD_Stage( self, words ):
""" Dump a stage state.
Format:
'S', stage, valid, stall, flush, iid, mid, comment
"""
# Check whether an op on this stage is valid or not.
if words[2] == 'x':
valid = False
else:
valid = int( words[2] )
if( not valid ):
return
op = self.CreateOpFromString( words )
# if both stall and clear signals are asserted, it means send bubble and
# it is not pipeline flush.
flush = op.clear and not op.stall;
if (op.gid in self.retired):
if flush:
# Ops in a backend may be flush more than once, because there
# are ops in pipeline stages and an active list.
return
else:
print "A retired op is dumped. op: (%s)" % ( op.__repr__() )
comment = words[7]
current = self.currentCycle
gid = op.gid
retire = op.stageID == RSD_PARSER_RETIREMENT_STAGE_ID
# Check whether an event occurs or not.
if gid in self.ops:
prevOp = self.ops[ gid ]
op.label = prevOp.label
# End stalling
if prevOp.stall and not op.stall:
self.AddEvent( current, gid, self.EVENT_STALL_END, prevOp.stageID, "" )
# End and begin a current stage For output comment.
self.AddEvent( current, gid, self.EVENT_STAGE_END, prevOp.stageID, "" )
self.AddEvent( current, gid, self.EVENT_STAGE_BEGIN, op.stageID, comment )
# Begin stalling
if not prevOp.stall and op.stall:
self.AddEvent( current, gid, self.EVENT_STALL_BEGIN, op.stageID, comment )
# End/Begin a stage
if prevOp.stageID != op.stageID:
self.AddEvent( current, gid, self.EVENT_STAGE_END, prevOp.stageID, "" )
self.AddEvent( current, gid, self.EVENT_STAGE_BEGIN, op.stageID, comment )
if retire:
self.AddRetiredGID( gid )
# Count num of commmitted ops.
op.commit = True
op.cid = self.committedOpNum
self.committedOpNum += 1
# Close a last stage
self.AddEvent( current + 1, gid, self.EVENT_STAGE_END, op.stageID, "" )
self.AddEvent( current + 1, gid, self.EVENT_RETIRE, op.stageID, "" )
else:
# Initalize/Begin a stage
self.AddEvent( current, gid, self.EVENT_INIT, op.stageID, "" )
self.AddEvent( current, gid, self.EVENT_STAGE_BEGIN, op.stageID, comment )
if ( op.stall ):
self.AddEvent( current, gid, self.EVENT_STALL_BEGIN, op.stageID, "" )
# if both stall and clear signals are asserted, it means send bubble and
# it is not pipeline flush.
if flush:
self.AddRetiredGID( gid )
prevOp = self.ops[ gid ]
if prevOp.stageID == 0:
# When an instruction was flushed in NextPCStage,
# delete the op from self.ops so that the instruction is not dumped
del self.ops[ gid ]
return
else:
# Add events about flush
self.AddEvent( current, gid, self.EVENT_STAGE_END, op.stageID, "" )
self.AddEvent( current, gid, self.EVENT_FLUSH, op.stageID, comment )
self.ops[ gid ] = op
def CreateOpFromString( self, words ):
""" Create an op from strings split from a source line text.
Format:
'S', stage, stall, valid, clear, iid, mid
"""
stageID = int( words[1] )
stall = int( words[3] ) != 0
clear = int( words[4] ) != 0
iid = int( words[5] )
mid = int( words[6] )
gid = self.CreateGID( iid, mid )
return self.Op( iid, mid, gid, stall, clear, stageID, self.currentCycle )
def AddEvent( self, cycle, gid, type, stageID, comment ):
""" Add an event to an event list. """
event = self.Event( gid, type, stageID, comment )
if cycle not in self.events:
self.events[ cycle ] = []
self.events[ cycle ].append( event )
def OnRSD_Label( self, words ):
""" Dump information about an op
Format:
'L', iid, mid, pc, code
"""
iid = int( words[1] )
mid = int( words[2] )
pc = words[3]
code = words[4]
gid = self.CreateGID( iid, mid )
# Add label information to a label database.
| |
ord(".")
a[2] = ord("?")
assert p[2] == b"?"
def test_from_buffer_more_cases():
try:
from _cffi_backend import _testbuff
except ImportError:
py.test.skip("not for pypy")
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
#
def check1(bufobj, expected):
c = from_buffer(BCharA, bufobj)
assert typeof(c) is BCharA
if sys.version_info >= (3,):
expected = [bytes(c, "ascii") for c in expected]
assert list(c) == list(expected)
#
def check(methods, expected, expected_for_memoryview=None):
if sys.version_info >= (3,):
if methods <= 7:
return
if expected_for_memoryview is not None:
expected = expected_for_memoryview
class X(object):
pass
_testbuff(X, methods)
bufobj = X()
check1(bufobj, expected)
try:
from __builtin__ import buffer
bufobjb = buffer(bufobj)
except (TypeError, ImportError):
pass
else:
check1(bufobjb, expected)
try:
bufobjm = memoryview(bufobj)
except (TypeError, NameError):
pass
else:
check1(bufobjm, expected_for_memoryview or expected)
#
check(1, "RDB")
check(2, "WRB")
check(4, "CHB")
check(8, "GTB")
check(16, "ROB")
#
check(1 | 2, "RDB")
check(1 | 4, "RDB")
check(2 | 4, "CHB")
check(1 | 8, "RDB", "GTB")
check(1 | 16, "RDB", "ROB")
check(2 | 8, "WRB", "GTB")
check(2 | 16, "WRB", "ROB")
check(4 | 8, "CHB", "GTB")
check(4 | 16, "CHB", "ROB")
def test_from_buffer_require_writable():
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
p1 = from_buffer(BCharA, b"foo", False)
assert p1 == from_buffer(BCharA, b"foo", False)
py.test.raises((TypeError, BufferError), from_buffer, BCharA, b"foo", True)
ba = bytearray(b"foo")
p1 = from_buffer(BCharA, ba, True)
p1[0] = b"g"
assert ba == b"goo"
def test_from_buffer_types():
BInt = new_primitive_type("int")
BIntP = new_pointer_type(BInt)
BIntA = new_array_type(BIntP, None)
lst = [-12345678, 87654321, 489148]
bytestring = bytearray(buffer(newp(BIntA, lst))[:] + b'XYZ')
lst2 = lst + [42, -999999999]
bytestring2 = bytearray(buffer(newp(BIntA, lst2))[:] + b'XYZ')
#
p1 = from_buffer(BIntA, bytestring) # int[]
assert typeof(p1) is BIntA
assert len(p1) == 3
assert p1[0] == lst[0]
assert p1[1] == lst[1]
assert p1[2] == lst[2]
with pytest.raises(IndexError):
p1[3]
with pytest.raises(IndexError):
p1[-1]
#
py.test.raises(TypeError, from_buffer, BInt, bytestring)
#
p2 = from_buffer(BIntP, bytestring) # int *
assert p2 == p1 or 'PY_DOT_PY' in globals()
# note: on py.py ^^^, bytearray buffers are not emulated well enough
assert typeof(p2) is BIntP
assert p2[0] == lst[0]
assert p2[1] == lst[1]
assert p2[2] == lst[2]
# hopefully does not crash, but doesn't raise an exception:
p2[3]
p2[-1]
# not enough data even for one, but this is not enforced:
from_buffer(BIntP, b"")
#
BIntA2 = new_array_type(BIntP, 2)
p2 = from_buffer(BIntA2, bytestring) # int[2]
assert typeof(p2) is BIntA2
assert len(p2) == 2
assert p2[0] == lst[0]
assert p2[1] == lst[1]
with pytest.raises(IndexError):
p2[2]
with pytest.raises(IndexError):
p2[-1]
assert p2 == p1 or 'PY_DOT_PY' in globals()
#
BIntA4 = new_array_type(BIntP, 4) # int[4]: too big
py.test.raises(ValueError, from_buffer, BIntA4, bytestring)
#
BStruct = new_struct_type("foo")
complete_struct_or_union(BStruct, [('a1', BInt, -1),
('a2', BInt, -1)])
BStructP = new_pointer_type(BStruct)
BStructA = new_array_type(BStructP, None)
p1 = from_buffer(BStructA, bytestring2) # struct[]
assert len(p1) == 2
assert typeof(p1) is BStructA
assert p1[0].a1 == lst2[0]
assert p1[0].a2 == lst2[1]
assert p1[1].a1 == lst2[2]
assert p1[1].a2 == lst2[3]
with pytest.raises(IndexError):
p1[2]
with pytest.raises(IndexError):
p1[-1]
assert repr(p1) == "<cdata 'foo[]' buffer len 2 from 'bytearray' object>"
#
p2 = from_buffer(BStructP, bytestring2) # 'struct *'
assert p2 == p1 or 'PY_DOT_PY' in globals()
assert typeof(p2) is BStructP
assert p2.a1 == lst2[0]
assert p2.a2 == lst2[1]
assert p2[0].a1 == lst2[0]
assert p2[0].a2 == lst2[1]
assert p2[1].a1 == lst2[2]
assert p2[1].a2 == lst2[3]
# does not crash:
p2[2]
p2[-1]
# not enough data even for one, but this is not enforced:
from_buffer(BStructP, b"")
from_buffer(BStructP, b"1234567")
#
release(p1)
assert repr(p1) == "<cdata 'foo[]' buffer RELEASED>"
#
BEmptyStruct = new_struct_type("empty")
complete_struct_or_union(BEmptyStruct, [], Ellipsis, 0)
assert sizeof(BEmptyStruct) == 0
BEmptyStructP = new_pointer_type(BEmptyStruct)
BEmptyStructA = new_array_type(BEmptyStructP, None)
py.test.raises(ZeroDivisionError, from_buffer, # empty[]
BEmptyStructA, bytestring)
#
BEmptyStructA5 = new_array_type(BEmptyStructP, 5)
p1 = from_buffer(BEmptyStructA5, bytestring) # struct empty[5]
assert typeof(p1) is BEmptyStructA5
assert len(p1) == 5
assert (cast(BIntP, p1) == from_buffer(BIntA, bytestring)
or 'PY_DOT_PY' in globals())
#
BVarStruct = new_struct_type("varfoo")
BVarStructP = new_pointer_type(BVarStruct)
complete_struct_or_union(BVarStruct, [('a1', BInt, -1),
('va', BIntA, -1)])
with pytest.raises(TypeError):
from_buffer(BVarStruct, bytestring)
pv = from_buffer(BVarStructP, bytestring) # varfoo *
assert pv.a1 == lst[0]
assert pv.va[0] == lst[1]
assert pv.va[1] == lst[2]
assert sizeof(pv[0]) == 1 * size_of_int()
with pytest.raises(TypeError):
len(pv.va)
# hopefully does not crash, but doesn't raise an exception:
pv.va[2]
pv.va[-1]
# not enough data even for one, but this is not enforced:
from_buffer(BVarStructP, b"")
assert repr(pv) == "<cdata 'varfoo *' buffer from 'bytearray' object>"
assert repr(pv[0]).startswith("<cdata 'varfoo &' ")
#
release(pv)
assert repr(pv) == "<cdata 'varfoo *' buffer RELEASED>"
assert repr(pv[0]).startswith("<cdata 'varfoo &' ")
#
pv = from_buffer(BVarStructP, bytestring) # make a fresh one
with pytest.raises(ValueError):
release(pv[0])
def test_issue483():
BInt = new_primitive_type("int")
BIntP = new_pointer_type(BInt)
BIntA = new_array_type(BIntP, None)
lst = list(range(25))
bytestring = bytearray(buffer(newp(BIntA, lst))[:] + b'XYZ')
p1 = from_buffer(BIntA, bytestring) # int[]
assert len(buffer(p1)) == 25 * size_of_int()
assert sizeof(p1) == 25 * size_of_int()
#
p2 = from_buffer(BIntP, bytestring)
assert sizeof(p2) == size_of_ptr()
assert len(buffer(p2)) == size_of_int() # first element only, by default
def test_memmove():
Short = new_primitive_type("short")
ShortA = new_array_type(new_pointer_type(Short), None)
Char = new_primitive_type("char")
CharA = new_array_type(new_pointer_type(Char), None)
p = newp(ShortA, [-1234, -2345, -3456, -4567, -5678])
memmove(p, p + 1, 4)
assert list(p) == [-2345, -3456, -3456, -4567, -5678]
p[2] = 999
memmove(p + 2, p, 6)
assert list(p) == [-2345, -3456, -2345, -3456, 999]
memmove(p + 4, newp(CharA, b"\x71\x72"), 2)
if sys.byteorder == 'little':
assert list(p) == [-2345, -3456, -2345, -3456, 0x7271]
else:
assert list(p) == [-2345, -3456, -2345, -3456, 0x7172]
def test_memmove_buffer():
import array
Short = new_primitive_type("short")
ShortA = new_array_type(new_pointer_type(Short), None)
a = array.array('H', [10000, 20000, 30000])
p = newp(ShortA, 5)
memmove(p, a, 6)
assert list(p) == [10000, 20000, 30000, 0, 0]
memmove(p + 1, a, 6)
assert list(p) == [10000, 10000, 20000, 30000, 0]
b = array.array('h', [-1000, -2000, -3000])
memmove(b, a, 4)
assert b.tolist() == [10000, 20000, -3000]
assert a.tolist() == [10000, 20000, 30000]
p[0] = 999
p[1] = 998
p[2] = 997
p[3] = 996
p[4] = 995
memmove(b, p, 2)
assert b.tolist() == [999, 20000, -3000]
memmove(b, p + 2, 4)
assert b.tolist() == [997, 996, -3000]
p[2] = -p[2]
p[3] = -p[3]
memmove(b, p + 2, 6)
assert b.tolist() == [-997, -996, 995]
def test_memmove_readonly_readwrite():
SignedChar = new_primitive_type("signed char")
SignedCharA = new_array_type(new_pointer_type(SignedChar), None)
p = newp(SignedCharA, 5)
memmove(p, b"abcde", 3)
assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0]
memmove(p, bytearray(b"ABCDE"), 2)
assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0]
py.test.raises((TypeError, BufferError), memmove, b"abcde", p, 3)
ba = bytearray(b"xxxxx")
memmove(dest=ba, src=p, n=3)
assert ba == bytearray(b"ABcxx")
memmove(ba, b"EFGH", 4)
assert ba == bytearray(b"EFGHx")
def test_memmove_sign_check():
SignedChar = new_primitive_type("signed char")
SignedCharA = new_array_type(new_pointer_type(SignedChar), None)
p = newp(SignedCharA, 5)
py.test.raises(ValueError, memmove, p, p + 1, -1) # not segfault
def test_memmove_bad_cdata():
BInt = new_primitive_type("int")
p = cast(BInt, 42)
py.test.raises(TypeError, memmove, p, bytearray(b'a'), 1)
py.test.raises(TypeError, memmove, bytearray(b'a'), p, 1)
def test_dereference_null_ptr():
BInt = new_primitive_type("int")
BIntPtr = new_pointer_type(BInt)
p = cast(BIntPtr, 0)
with pytest.raises(RuntimeError):
p[0]
with pytest.raises(RuntimeError):
p[0] = 42
with pytest.raises(RuntimeError):
p[42]
with pytest.raises(RuntimeError):
p[42] = -1
def test_mixup():
BStruct1 = new_struct_type("foo")
BStruct2 = new_struct_type("foo") # <= same name as BStruct1
BStruct3 = new_struct_type("bar")
BStruct1Ptr = new_pointer_type(BStruct1)
BStruct2Ptr = new_pointer_type(BStruct2)
BStruct3Ptr = new_pointer_type(BStruct3)
BStruct1PtrPtr = new_pointer_type(BStruct1Ptr)
BStruct2PtrPtr = new_pointer_type(BStruct2Ptr)
BStruct3PtrPtr = new_pointer_type(BStruct3Ptr)
pp1 = newp(BStruct1PtrPtr)
pp2 = newp(BStruct2PtrPtr)
pp3 = newp(BStruct3PtrPtr)
pp1[0] = pp1[0]
with pytest.raises(TypeError) as e:
pp3[0] = pp1[0]
assert str(e.value).startswith("initializer for ctype 'bar *' must be a ")
assert str(e.value).endswith(", not cdata 'foo *'")
with pytest.raises(TypeError) as e:
pp2[0] = pp1[0]
assert str(e.value) == ("initializer for ctype 'foo *' appears indeed to "
"be 'foo *', but the types are different (check "
"that you are not e.g. mixing up different ffi "
"instances)")
def test_stdcall_function_type():
assert FFI_CDECL == FFI_DEFAULT_ABI
try:
stdcall = FFI_STDCALL
except NameError:
stdcall = FFI_DEFAULT_ABI
BInt = new_primitive_type("int")
BFunc = new_function_type((BInt, BInt), BInt, False, stdcall)
if stdcall != FFI_DEFAULT_ABI:
assert repr(BFunc) == "<ctype 'int(__stdcall *)(int, int)'>"
else:
assert repr(BFunc) == "<ctype 'int(*)(int, int)'>"
def test_get_common_types():
d = {}
_get_common_types(d)
assert d['bool'] == '_Bool'
def test_unpack():
BChar = new_primitive_type("char")
BArray = new_array_type(new_pointer_type(BChar), 10) # char[10]
p = newp(BArray, b"abc\x00def")
p0 = p
assert unpack(p, 10) == | |
<reponame>wjsi/aliyun-odps-python-sdk<filename>docs/source/conf.py
# -*- coding: utf-8 -*-
#
# PyOdps documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 18 09:47:14 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shutil
import atexit
import tempfile
import textwrap
import codecs
import re
from sphinx.directives import Include
dirname = os.path.dirname
docroot = os.path.dirname(os.path.abspath(__file__))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, dirname(dirname(dirname(os.path.abspath(__file__)))))
sys.path.append(os.path.abspath('../sphinx-ext/'))
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyODPS'
copyright = u'2014-2018, The Alibaba Group Holding Ltd.'
author = u'<NAME>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from odps import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
if 'gettext' not in sys.argv:
try:
import odps.internal
with_internal = True
exclude_patterns = ['*-ext.rst', '*-ext-*.rst']
except ImportError:
with_internal = False
exclude_patterns = ['*-int.rst', '*-int-*.rst']
else:
with_internal = None
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/PyODPS.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyOdpsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_engine = 'xelatex'
latex_elements = {
'preamble': textwrap.dedent(r"""
\usepackage{svg}
\usepackage{hyperref}
\setcounter{tocdepth}{2}
""")
}
if on_rtd:
del latex_engine
latex_elements['preamble'] = textwrap.dedent(r"""
\hypersetup{unicode=true}
\usepackage{CJKutf8}
\usepackage{svg}
\usepackage{hyperref}
\setcounter{tocdepth}{2}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
\DeclareUnicodeCharacter{2203}{\ensuremath{\exists}}
\DeclareUnicodeCharacter{2200}{\ensuremath{\forall}}
\DeclareUnicodeCharacter{2286}{\ensuremath{\subseteq}}
\DeclareUnicodeCharacter{2713}{x}
\DeclareUnicodeCharacter{27FA}{\ensuremath{\Longleftrightarrow}}
\DeclareUnicodeCharacter{221A}{\ensuremath{\sqrt{}}}
\DeclareUnicodeCharacter{221B}{\ensuremath{\sqrt[3]{}}}
\DeclareUnicodeCharacter{2295}{\ensuremath{\oplus}}
\DeclareUnicodeCharacter{2297}{\ensuremath{\otimes}}
\begin{CJK}{UTF8}{gbsn}
\AtEndDocument{\end{CJK}}
""")
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyOdps.tex', u'PyODPS Documentation',
u'The Alibaba Group Holding Ltd.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyodps', u'PyODPS Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyODPS', u'PyODPS Documentation',
author, 'PyODPS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all | |
import re
from typing import Any, Callable, Dict, Iterator, List, Mapping, OrderedDict, Union
import asdf
from asdf import ValidationError
from asdf.schema import _type_to_tag
from asdf.tagged import TaggedDict
from weldx.asdf.extension import WxSyntaxError
from weldx.asdf.tags.weldx.time.datetimeindex import DatetimeIndexType
from weldx.asdf.tags.weldx.time.timedeltaindex import TimedeltaIndexType
from weldx.constants import WELDX_QUANTITY as Q_
from weldx.constants import WELDX_UNIT_REGISTRY as UREG
def _walk_validator(
instance: OrderedDict,
validator_dict: OrderedDict,
validator_function: Callable[[Mapping, Any, str], Iterator[ValidationError]],
position=None,
allow_missing_keys: bool = False,
) -> Iterator[ValidationError]:
"""Walk instance and validation dict entries in parallel and apply a validator func.
This function can be used to recursively walk both the instance dictionary and the
custom validation dictionary in parallel. Once a leaf dictionary entry is reached,
the validation function is applied to the selected items.
Parameters
----------
instance:
Tree serialization (with default dtypes) of the instance
validator_dict:
OrderedDict representation of the validation structure.
validator_function:
Custom python validator function to apply along the (nested) dictionary
position:
String representation of the current nested schema position
allow_missing_keys:
If True will skip validation if the requested key to validate does not exist.
Yields
------
asdf.ValidationError
"""
if position is None: # pragma: no cover
position = []
if isinstance(validator_dict, dict):
for key, item in validator_dict.items():
if isinstance(item, Mapping):
yield from _walk_validator(
instance[key],
validator_dict[key],
validator_function,
position=position + [key],
allow_missing_keys=allow_missing_keys,
)
else:
if key in instance:
yield from validator_function(instance[key], item, position + [key])
elif allow_missing_keys: # pragma: no cover
pass
else: # pragma: no cover
pass
# TODO: if a property is not required the key might be missing
# yield ValidationError(f"Missing key {key}")
else:
yield from validator_function(instance, validator_dict, position)
def _unit_validator(
instance: Mapping, expected_dimensionality: str, position: List[str]
) -> Iterator[ValidationError]:
"""Validate the 'unit' key of the instance against the given string.
Parameters
----------
instance:
Tree serialization with 'unit' key to validate.
expected_dimensionality:
String representation of the unit dimensionality to test against.
position:
Current position in nested structure for debugging
Yields
------
asdf.ValidationError
"""
if not position:
position = instance
unit = instance["unit"]
valid = Q_(unit).check(UREG.get_dimensionality(expected_dimensionality))
if not valid:
yield ValidationError(
f"Error validating unit dimension for property '{position}'. "
f"Expected unit of dimension '{expected_dimensionality}' "
f"but got unit '{unit}'"
)
def _compare(_int, exp_string):
"""Compare helper of two strings for _custom_shape_validator.
An integer and an expected string are compared so that the string either contains
a ":" and thus describes an interval or a string consisting of numbers. So if our
integer is within the interval or equal to the described number, True is returned.
The interval can be open, in that there is no number left or right of the ":"
symbol.
Examples:
---------
_int = 5
exp_string = "5"
-> True
_int = 5
exp_string = ":"
-> True
Open interval:
_int = 5
exp_string = "3:"
-> True
_int = 5
exp_string = "4"
-> False
_int = 5
exp_string = "6:8"
-> False
Parameters
----------
_int:
Integer
exp_string:
String with the expected dimension
Returns
-------
bool
True or False
"""
if _int < 0:
raise WxSyntaxError("Negative dimension found")
if ":" in exp_string:
ranges = exp_string.split(":")
if ranges[0] == "":
ranges[0] = 0
elif ranges[0].isnumeric():
ranges[0] = int(ranges[0])
else:
raise WxSyntaxError(f"Non numeric character in range {exp_string}")
if ranges[1] == "":
ranges[1] = _int
elif ranges[1].isnumeric():
ranges[1] = int(ranges[1])
else:
raise WxSyntaxError(f"Non numeric character in range {exp_string}")
if ranges[0] > ranges[1]:
raise WxSyntaxError(f"The range should not be descending in {exp_string}")
return int(ranges[0]) <= _int <= int(ranges[1])
else:
return _int == int(exp_string)
def _prepare_list(_list, list_expected):
"""Prepare a List and an expected List for validation.
The preparation of the lists consists in accepting all lists that contain
white spaces and rewriting all lists that contain the symbols ":" as well as "~" to
a ":". In addition, lists that begin with "..." or parentheses are reversed for
validation to work.
parameters
----------
_list:
List with values
list_expected:
List with expected values
returns
-------
_list:
prepared List
list_expected:
prepared List with expected values
"""
# remove blank spaces in dict_test
_list = [x.replace(" ", "") if isinstance(x, str) else x for x in _list]
# accept "~" additionally as input of ":". And remove blank spaces.
list_expected = [
x.replace(" ", "").replace("~", ":") if isinstance(x, str) else x
for x in list_expected
]
# turn around the list if "..." or "(" are at the beginning.
# because the validation is made from begin -> end.
# like this we validate the array from end -> begin.
if "(" in str(list_expected[0]) or "..." in str(list_expected[0]):
list_expected = list(reversed(list_expected))
_list = list(reversed(_list))
return _list, list_expected
def _is_range_format_valid(format_string: str):
"""
Return 'True' if a string represents a valid range definition and 'False' otherwise.
Parameters
----------
format_string:
String that should be checked.
Returns
-------
bool:
'True' if the passed string is a valid range definition, 'False' otherwise
"""
if ":" in format_string:
if len(format_string.split(":")) != 2:
return False
format_string = format_string.replace(":", "")
return format_string.isalnum() or format_string == ""
return format_string.isalnum()
def _validate_expected_list(list_expected):
"""Validate an expected List and raises exceptions.
The syntax of the expected list is validated.
Examples that will raise errors:
Variable length should be at beginning or end.
[1, 2, "...", 4, 5]
[1, 2, "(3)", 4, 5]
Additional arguments are not accepted
[1, 2, 3, 4, "5..."]
[1, 2, 3, 4, "5(5)"]
params
------
list_expected:
Expected List to validate against
raises
------
ValueError:
ValueError will be raised if an rule violation is found
"""
validator = 0
for exp in list_expected:
if validator == 1 and not ("(" in str(exp) or "..." in str(exp)):
raise WxSyntaxError(
"Optional dimensions in the expected "
"shape should only stand at the end/beginning."
)
if validator == 2:
raise WxSyntaxError('After "..." should not be another dimension.')
if "..." in str(exp):
if "..." != exp:
raise WxSyntaxError(
f'"..." should not have additional properties:' f" {exp} was found."
)
validator = 2
elif "(" in str(exp):
val = re.search(r"\((.*)\)", exp)
if (
val is None
or len(val.group(1)) + 2 != len(exp)
or not _is_range_format_valid(val.group(1))
):
raise WxSyntaxError(
f'Invalid optional dimension format. Correct format is "(_)", but '
f" {exp} was found."
)
validator = 1
elif not _is_range_format_valid(str(exp)):
raise WxSyntaxError(
f"{exp} is an invalid range format."
f"Consult the documentation for a list of all valid options"
)
def _compare_lists(_list, list_expected):
"""Compare two lists.
The two lists are interpreted as a list of dimensions. We compare the dimensions of
these lists and in the case of a mismatch, False is returned. If the dimensions
match and there are variables in the list_expected, they are entered into a
dictionary and this is output. The dictionary can be empty if there are no
variables in the list_expected.
Examples:
---------
_compare_lists([1, 2, 3], [1, 2, 3])
-> {}
_compare_lists([1, 2, 3], [1, n1, n2])
-> {n1: 2, n2: 3}
_compare_lists([1, 2, 3], [1, "..."])
-> {}
_compare_lists([1, 2, 3], [1, 2, 4])
-> False
params
------
_list:
List of Integer
list_expected:
List build by the rules in _custom_shape_validator
returns
-------
False:
when a dimension mismatch occurs
dict_values:
when no dimension mismatch occurs. Can be empty {}. Dictionary - keys: variable
names in the validation schemes. values: values of the validation schemes.
"""
dict_values = dict()
has_variable_dim_num = False
for i, exp in enumerate(list_expected):
if "..." in str(exp):
has_variable_dim_num = True
break # all the following dimensions are accepted
if "(" in str(exp):
if i < len(_list):
exp = re.search(r"\((.*)\)", exp).group(1)
else: # pragma: no cover
continue # TODO: actually covered, but not registered by codecov - bug?
# all alphanumeric strings are OK - only numeric strings are not
# eg: "n", "n1", "n1234", "myasdfstring1337"
if str(exp).isalnum() and not str(exp).isnumeric():
if exp not in dict_values:
dict_values[exp] = _list[i]
elif _list[i] != dict_values[exp]:
return False
elif i >= len(_list) or not _compare(_list[i], str(exp)):
return False
if (len(_list) > len(list_expected)) and not has_variable_dim_num:
return False
return dict_values
def _get_instance_shape(instance_dict: Union[TaggedDict, Dict[str, Any]]) -> List[int]:
"""Get the shape of an ASDF instance from its tagged dict form."""
if | |
#! /usr/bin/python
"""Code to combine different slabs into a single high resolution slab
# This code was developed by <NAME> at the ARAMIS lab.
# Maintainer: <NAME> (2019)
#
# For any use of this code, the following paper must be cited:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
# Robust imaging of hippocampal inner structure at 7T: in vivo
# acquisition protocol and methodological choices.
"""
import os
import shutil
import argparse
import io
import contextlib
import gzip
import numpy as np
import nibabel as nib
import nilearn as nil
import nilearn.image
import nipype.interfaces.spm as spm
import nipype.interfaces.matlab as mlab
import check_spm
def read_cli_args():
"""Read command-line interface arguments
Parse the input to the command line with the argparse module.
Args:
N/A
Returns:
args (argparse.Namespace): parsed arguments
cli_usage (string): command-line interface usage message
"""
# read command line arguments
cli_description = 'Code to combine different slabs into a single high'
cli_description = '{0} resolution slab'.format(cli_description)
parser = argparse.ArgumentParser(description=cli_description)
# add arguments
#-- mandatory arguments
#---- first slab of first repetition
parser.add_argument(
'rep1s1_path',
metavar='rep1s1',
help='.nii(.gz) first slab of first repetition')
#---- second slab of first repetition
parser.add_argument(
'rep1s2_path',
metavar='rep1s2',
help='.nii(.gz) second slab of first repetition')
#---- first slab of first repetition
parser.add_argument(
'rep2s1_path',
metavar='rep2s1',
help='.nii(.gz) first slab of second repetition')
#---- second slab of first repetition
parser.add_argument(
'rep2s2_path',
metavar='rep2s2',
help='.nii(.gz) second slab of second repetition')
#---- low resolution volume
parser.add_argument(
'lowres_path',
metavar='lowres',
help='.nii(.gz) low resolution volume')
#---- output dir
parser.add_argument(
'outdir_path',
metavar='out_dir',
help='path where output files will be stored')
#-- optional arguments
parser.add_argument(
'-spm',
'--spm_path',
help='path to SPM folder (i.e., where spm.m is located)')
# parse all arguments
args = parser.parse_args()
# store usage message in string
cli_usage = None
with io.StringIO() as buf, contextlib.redirect_stdout(buf):
parser.print_usage()
cli_usage = buf.getvalue()
if cli_usage is None:
raise ValueError('the CLI usage variable must not be empty')
return args, cli_usage
def check_spm_available(args, cli_usage):
"""Check SPM can be found by Matlab
Make sure SPM can be found by Matlab and terminate the program if it
cannot.
Args:
args (argparse.Namespace): parsed arguments
cli_usage (string): command-line interface usage message
Returns:
spm_path (string): path to SPM folder. Will be either
user-defined, or if not, automatically retrieved with the
'which spm' Matlab command
"""
# initialise path
#-- folder
spm_path = None
#-- script ([folder]/spm.m)
spmscript_path = None
# check if SPM path provided by the user
if args.spm_path:
# SPM path provided
# check if the user provided a path to spm.m (which they should
# not...) instead of path to folder
abs_argsspm_path = os.path.abspath(args.spm_path)
# check if exists
if os.path.exists(abs_argsspm_path):
if os.path.isfile(abs_argsspm_path):
# path fo file provided
# get file name
argsspm_filename = os.path.basename(args.spm_path)
# check that the file name is spm.m
if argsspm_filename != 'spm.m':
# file name is not spm.m. Crash
raise ValueError(
'{0} is not a valid path to SPM.'.format(args.spm_path))
else:
# file name is spm.m. define SPM path as the folder
# containing the provided spm.m
spmscript_path = args.spm_path
spm_path = os.path.dirname(spmscript_path)
elif os.path.isdir(abs_argsspm_path):
# a path to a folder was provided
spm_path = args.spm_path
spmscript_path = os.path.join(spm_path, 'spm.m')
else:
# neither a folder nor a file was provided
raise ValueError(
'{0} is not a valid path to SPM.'.format(args.spm_path))
# add to matlab path
mlab.MatlabCommand.set_default_paths(args.spm_path)
else:
raise ValueError(
'{0} does not exist.'.format(args.spm_path))
print('[SPM_PATH] provided by user. Using {0}'.format(spm_path))
else:
# SPM path not provided
# Check if SPM path can be found anyway
# (e.g., if file $HOME/matlab/startup.m contains the line
# addpath [spm_folder])
[
spm_found,
spm_path,
spmscript_path] = check_spm.check_system_spm_available()
if not spm_found:
# SPM not found
error_msg = 'SPM not found.\nPlease provide the path to'
error_msg = '{0} SPM with flag -spm [SPM_PATH]'.format(error_msg)
error_msg = '{0}\n{1}'.format(error_msg, cli_usage)
raise IOError(error_msg)
print('[SPM_PATH] not provided by user. Using {0}'.format(spm_path))
# sanity check: make sure the path is OK
#-- check the path to SPM is a valid folder
abs_spm_path = os.path.abspath(spm_path)
#---- check if exists
if os.path.exists(abs_spm_path):
#---- exists
#---- check if is a folder
if not os.path.isdir(abs_spm_path):
raise IOError('{0} is not a valid folder'.format(spm_path))
else:
#---- does not exist
raise IOError('{0} does not exist'.format(spm_path))
#-- check if the SPM folder contains a file spm.m
#---- get absolute path to spm.m
abs_spmscript_path = os.path.abspath(spmscript_path)
#---- check if file exists
if os.path.exists(abs_spmscript_path):
#---- spm.m is inside the folder. Check it is a file.
if not os.path.isfile(abs_spmscript_path):
raise IOError('{0} is not a file'.format(spmscript_path))
else:
#---- spm.m does not exist
error_msg = '{0} does not contain a file spm.m.'.format(spm_path)
error_msg = '{0} It is not a valid folder'.format(error_msg)
raise IOError(error_msg)
return spm_path
def prepare_folders(outdir_path):
"""Create temporary folders
Will create two subfolders inside the output directory:
1. 'debug' subfolder: contains all intermediary images generated
prior to te output recombined images
2. 'temp' subfolder: contains all temporary data generated for SPM
processing (SPM modifies the header of the images which it is
working on, so we duplicate the image to be SPM-processed
before feeding them to SPM).
Args:
outdir_path (string): absolute path to output dir, where
results will get stored
Returns:
debugdir_path (string): path to 'debug' subfolder where all
intermediary images are stored
tempdir_path (string): path to temporary subfolder where images
to be processed with SPM are duplicated and stored
"""
# Output directory: check if exists and create if not
if os.path.isdir(outdir_path):
# check if output directory is empty
if os.listdir(outdir_path):
error_msg = 'Error: please provide an empty output dir'
raise IOError(error_msg)
else:
# create the output directory
os.makedirs(outdir_path)
# define paths
debugdir_path = "{0}/debug/".format(outdir_path)
tempdir_path = "{0}/temp/".format(outdir_path)
# create subfolders if do not exist already
#-- debug
try:
os.makedirs(debugdir_path)
except OSError:
if not os.path.isdir(debugdir_path):
raise
#-- temp
try:
os.makedirs(tempdir_path)
except OSError:
if not os.path.isdir(tempdir_path):
raise
return [debugdir_path, tempdir_path]
def spm_path_filestore(debugdir_path, spm_path):
"""Store SPM location in file
Will save the path to SPM to a new file, so the user knows what
version of SPM they have been using after the recombination process
is complete.
Args:
debugdir_path (string): path to 'debug' subfolder where all
intermediary images are stored
spm_path (string): path to SPM folder. Will be either
user-defined, or if not, automatically retrieved with the
'which spm' Matlab command
Returns:
N/A
"""
# define location of the file that contains the path to spm
spm_path_store_path = os.path.join(debugdir_path, 'spm_location.txt')
# check if file already exists (should not)
spm_path_store_filename = os.path.basename(spm_path_store_path)
spm_path_store_dirname = os.path.dirname(spm_path_store_path)
if os.path.exists(spm_path_store_path):
raise IOError(
'There already is a file {0} in {1}'.format(
spm_path_store_filename,
spm_path_store_dirname))
# write spm_path to the store file
with open(spm_path_store_path, 'w') as spm_path_store_file:
spm_path_store_file.write(spm_path)
def nii_copy(im_inpath, im_outpath):
"""Copy from input to output path with output in .nii
Convenience function to copy an image from an input path to an
output path, while making sure the destination file has a .nii
extension (and not a .nii.gz one). This is to make sure we will
be feeding SPM with uncompressed images.
Args:
im_inpath (string): path to the input image. The file can be
either of type .nii or of type .nii.gz
im_outpath (string): path to the output copied image. The file
is of type .nii
Returns:
N/A
"""
# Get extension of input image filename
imfilename = os.path.basename(im_inpath)
imfilename_ext = os.path.splitext(imfilename)[1]
error_msg = 'input image must be of type either .nii or .nii.gz'
if imfilename_ext == '.nii':
# if .nii, standard copy
shutil.copyfile(im_inpath, im_outpath)
elif imfilename_ext == '.gz':
# check if the extension is .nii.gz
imfilename_start = os.path.splitext(imfilename)[0]
imfilename_start_ext = os.path.splitext(imfilename_start)[1]
if imfilename_start_ext == '.nii':
# extension is .nii.gz
with gzip.open(im_inpath, 'rb') as im_infile:
with open(im_outpath, 'wb') as im_outfile:
shutil.copyfileobj(im_infile, im_outfile)
else:
raise IOError(error_msg)
else:
raise IOError(error_msg)
def safe_remove(impath, dirpath):
"""Check if image is in right folder before removing
Make sure the image to be removed is located somewhere within the
subfolder hierarchy of a defined folder (i.e., temporary folder or
folder containing the intermediary results).
This is mostly to ensure avoiding any accidental deletion anywhere
outside the working directory.
Args:
impath (string): path to the image to delete.
dirpath (string): path to folder containing the image | |
by one SciELO Collection.
`editor_address` references the institution who operates the
process.
`publisher_address` references the institution who is responsible
for the Journal.
"""
#Custom manager
objects = JournalCustomManager()
nocacheobjects = models.Manager()
userobjects = modelmanagers.JournalManager()
#Relation fields
creator = models.ForeignKey(User, related_name='enjoy_creator', editable=False)
sponsor = models.ManyToManyField('Sponsor', verbose_name=_('Sponsor'), related_name='journal_sponsor', null=True, blank=True)
previous_title = models.ForeignKey('Journal', verbose_name=_('Previous title'), related_name='prev_title', null=True, blank=True)
use_license = models.ForeignKey('UseLicense', verbose_name=_('Use license'))
collections = models.ManyToManyField('Collection', through='Membership')
languages = models.ManyToManyField('Language',)
national_code = models.CharField(_('National Code'), max_length=64, null=True, blank=True)
abstract_keyword_languages = models.ManyToManyField('Language', related_name="abstract_keyword_languages", )
subject_categories = models.ManyToManyField(SubjectCategory, verbose_name=_("Subject Categories"), related_name="journals", null=True)
study_areas = models.ManyToManyField(StudyArea, verbose_name=_("Study Area"), related_name="journals_migration_tmp", null=True)
editors = models.ManyToManyField(User, related_name='user_editors', null=True, blank=True)
#Fields
current_ahead_documents = models.IntegerField(_('Total of ahead of print documents for the current year'), max_length=3, default=0, blank=True, null=True)
previous_ahead_documents = models.IntegerField(_('Total of ahead of print documents for the previous year'), max_length=3, default=0, blank=True, null=True)
twitter_user = models.CharField(_('Twitter User'), max_length=128, null=True, blank=True)
title = models.CharField(_('Journal Title'), max_length=256, db_index=True)
title_iso = models.CharField(_('ISO abbreviated title'), max_length=256, db_index=True)
short_title = models.CharField(_('Short Title'), max_length=256, db_index=True, null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
acronym = models.CharField(_('Acronym'), max_length=16, blank=False)
scielo_issn = models.CharField(_('The ISSN used to build the Journal PID.'), max_length=16,
choices=sorted(choices.SCIELO_ISSN, key=lambda SCIELO_ISSN: SCIELO_ISSN[1]))
print_issn = models.CharField(_('Print ISSN'), max_length=9, db_index=True)
eletronic_issn = models.CharField(_('Electronic ISSN'), max_length=9, db_index=True)
subject_descriptors = models.CharField(_('Subject / Descriptors'), max_length=1024)
init_year = models.CharField(_('Initial Year'), max_length=4)
init_vol = models.CharField(_('Initial Volume'), max_length=16)
init_num = models.CharField(_('Initial Number'), max_length=16)
final_year = models.CharField(_('Final Year'), max_length=4, null=True, blank=True)
final_vol = models.CharField(_('Final Volume'), max_length=16, null=False, blank=True)
final_num = models.CharField(_('Final Number'), max_length=16, null=False, blank=True)
medline_title = models.CharField(_('Medline Title'), max_length=256, null=True, blank=True)
medline_code = models.CharField(_('Medline Code'), max_length=64, null=True, blank=True)
frequency = models.CharField(_('Frequency'), max_length=16,
choices=sorted(choices.FREQUENCY, key=lambda FREQUENCY: FREQUENCY[1]))
editorial_standard = models.CharField(_('Editorial Standard'), max_length=64,
choices=sorted(choices.STANDARD, key=lambda STANDARD: STANDARD[1]))
ctrl_vocabulary = models.CharField(_('Controlled Vocabulary'), max_length=64,
choices=choices.CTRL_VOCABULARY)
pub_level = models.CharField(_('Publication Level'), max_length=64,
choices=sorted(choices.PUBLICATION_LEVEL, key=lambda PUBLICATION_LEVEL: PUBLICATION_LEVEL[1]))
secs_code = models.CharField(_('SECS Code'), max_length=64, null=False, blank=True)
copyrighter = models.CharField(_('Copyrighter'), max_length=254)
url_online_submission = models.CharField(_('URL of online submission'), max_length=128, null=True, blank=True)
url_journal = models.CharField(_('URL of the journal'), max_length=128, null=True, blank=True)
notes = models.TextField(_('Notes'), max_length=254, null=True, blank=True)
index_coverage = models.TextField(_('Index Coverage'), null=True, blank=True)
cover = models.ImageField(_('Journal Cover'), upload_to='img/journal_cover/', null=True, blank=True)
logo = models.ImageField(_('Journal Logo'), upload_to='img/journals_logos', null=True, blank=True)
is_trashed = models.BooleanField(_('Is trashed?'), default=False, db_index=True)
other_previous_title = models.CharField(_('Other Previous Title'), max_length=255, blank=True)
editor_name = models.CharField(_('Editor Names'), max_length=512)
editor_address = models.CharField(_('Editor Address'), max_length=512)
editor_address_city = models.CharField(_('Editor City'), max_length=256)
editor_address_state = models.CharField(_('Editor State/Province/Region'), max_length=128)
editor_address_zip = models.CharField(_('Editor Zip/Postal Code'), max_length=64)
editor_address_country = modelfields.CountryField(_('Editor Country'))
editor_phone1 = models.CharField(_('Editor Phone 1'), max_length=32)
editor_phone2 = models.CharField(_('Editor Phone 2'), null=True, blank=True, max_length=32)
editor_email = models.EmailField(_('Editor E-mail'))
publisher_name = models.CharField(_('Publisher Name'), max_length=256)
publisher_country = modelfields.CountryField(_('Publisher Country'))
publisher_state = models.CharField(_('Publisher State/Province/Region'), max_length=64)
publication_city = models.CharField(_('Publication City'), max_length=64)
is_indexed_scie = models.BooleanField(_('SCIE'), default=False)
is_indexed_ssci = models.BooleanField(_('SSCI'), default=False)
is_indexed_aehci = models.BooleanField(_('A&HCI'), default=False)
def __unicode__(self):
return self.title
class Meta:
ordering = ['title']
permissions = (("list_journal", "Can list Journals"),
("list_editor_journal", "Can list editor Journals"))
def issues_as_grid(self, is_available=True):
objects_all = self.issue_set.available(is_available).order_by(
'-publication_year', '-volume')
grid = OrderedDict()
for issue in objects_all:
year_node = grid.setdefault(issue.publication_year, OrderedDict())
volume_node = year_node.setdefault(issue.volume, [])
volume_node.append(issue)
for year, volume in grid.items():
for vol, issues in volume.items():
issues.sort(key=lambda x: x.order)
return grid
def has_issues(self, issues):
"""
Returns ``True`` if all the given issues are bound to the journal.
``issues`` is a list of Issue pk.
"""
issues_to_test = set(int(issue) for issue in issues)
bound_issues = set(issue.pk for issue in self.issue_set.all())
return issues_to_test.issubset(bound_issues)
def reorder_issues(self, new_order, publication_year, volume=None):
"""
Make persistent the ordering received as a list of ``pk``,
to all the issues in a given ``publication_year`` and ``volume``.
The lenght of ``new_order`` must match with the subset of
issues by ``publication_year`` and ``volume``.
"""
filters = {'publication_year': publication_year}
if volume:
filters['volume'] = volume
issues = self.issue_set.filter(**filters)
issues_count = issues.count()
new_order_count = len(new_order)
if new_order_count != issues_count:
raise ValueError('new_order lenght does not match. %s:%s' % (new_order_count, issues_count))
with transaction.commit_on_success():
for i, pk in enumerate(new_order):
order = i + 1
issue = issues.get(pk=pk)
issue.order = order
issue.save()
def is_editor(self, user):
"""
Returns a boolean value depending if the given user is an editor
of the current journal.
"""
try:
self.editors.get(id=user.id)
except ObjectDoesNotExist:
return False
return True
@property
def scielo_pid(self):
"""
Returns the ISSN used as PID on SciELO public catalogs.
"""
attr = u'print_issn' if self.scielo_issn == u'print' else u'eletronic_issn'
return getattr(self, attr)
def join(self, collection, responsible):
"""Make this journal part of the collection.
"""
Membership.objects.create(journal=self,
collection=collection,
created_by=responsible,
status='inprogress')
def membership_info(self, collection, attribute=None):
"""Retrieve info about the relation of this journal with a
given collection.
"""
rel = self.membership_set.get(collection=collection)
if attribute:
return getattr(rel, attribute)
else:
return rel
def change_status(self, collection, new_status, reason, responsible):
rel = self.membership_info(collection)
rel.status = new_status
rel.reason = reason
rel.save()
class Membership(models.Model):
"""
Represents the many-to-many relation
between Journal and Collection.
"""
journal = models.ForeignKey('Journal')
collection = models.ForeignKey('Collection')
status = models.CharField(max_length=16, default="inprogress",
choices=choices.JOURNAL_PUBLICATION_STATUS)
since = models.DateTimeField(auto_now=True)
reason = models.TextField(_('Why are you changing the publication status?'),
blank=True, default="")
created_by = models.ForeignKey(User, editable=False)
def save(self, *args, **kwargs):
"""
Always save a copy at JournalTimeline
"""
super(Membership, self).save(*args, **kwargs)
JournalTimeline.objects.create(journal=self.journal,
collection=self.collection,
status=self.status,
reason=self.reason,
created_by=self.created_by,
since=self.since)
class Meta():
unique_together = ("journal", "collection")
class JournalTimeline(models.Model):
"""
Represents the status history of a journal.
"""
journal = models.ForeignKey('Journal', related_name='statuses')
collection = models.ForeignKey('Collection')
status = models.CharField(max_length=16,
choices=choices.JOURNAL_PUBLICATION_STATUS)
since = models.DateTimeField()
reason = models.TextField(default="")
created_by = models.ForeignKey(User)
class JournalTitle(caching.base.CachingMixin, models.Model):
objects = caching.base.CachingManager()
nocacheobjects = models.Manager()
journal = models.ForeignKey(Journal, related_name='other_titles')
title = models.CharField(_('Title'), null=False, max_length=128)
category = models.CharField(_('Title Category'), null=False, max_length=128, choices=sorted(choices.TITLE_CATEGORY, key=lambda TITLE_CATEGORY: TITLE_CATEGORY[1]))
class JournalMission(caching.base.CachingMixin, models.Model):
objects = caching.base.CachingManager()
nocacheobjects = models.Manager()
journal = models.ForeignKey(Journal, related_name='missions')
description = models.TextField(_('Mission'))
language = models.ForeignKey('Language', blank=False, null=True)
class UseLicense(caching.base.CachingMixin, models.Model):
objects = caching.base.CachingManager()
nocacheobjects = models.Manager()
license_code = models.CharField(_('License Code'), unique=True, null=False, blank=False, max_length=64)
reference_url = models.URLField(_('License Reference URL'), null=True, blank=True)
disclaimer = models.TextField(_('Disclaimer'), null=True, blank=True, max_length=512)
is_default = models.BooleanField(_('Is Default?'), default=False)
def __unicode__(self):
return self.license_code
class Meta:
ordering = ['license_code']
def save(self, *args, **kwargs):
"""
Only one UseLicense must be the default (is_default==True).
If already have one, these will be unset as default (is_default==False)
If None is already setted, this instance been saved, will be the default.
If the only one is unsetted as default, then will be foreced to be the default anyway,
to allways get one license setted as default
"""
qs = UseLicense.objects.filter(is_default=True)
if (qs.count() == 0 ) or (self in qs):
# no other was default, or ``self`` is the current default one,
# so ``self`` will be set as default
self.is_default = True
if self.is_default:
if self.pk:
qs = qs.exclude(pk=self.pk)
if qs.count() != 0:
qs.update(is_default=False)
super(UseLicense, self).save(*args, **kwargs)
class TranslatedData(caching.base.CachingMixin, models.Model):
objects = caching.base.CachingManager()
nocacheobjects = models.Manager()
translation = models.CharField(_('Translation'), null=True, blank=True, max_length=512)
language = models.CharField(_('Language'), choices=sorted(choices.LANGUAGE, key=lambda LANGUAGE: LANGUAGE[1]), null=False, blank=False, max_length=32)
model = models.CharField(_('Model'), null=False, blank=False, max_length=32)
field = models.CharField(_('Field'), null=False, blank=False, max_length=32)
def __unicode__(self):
return self.translation if self.translation is not None else 'Missing trans: {0}.{1}'.format(self.model, self.field)
class SectionTitle(caching.base.CachingMixin, models.Model):
objects = caching.base.CachingManager()
nocacheobjects = models.Manager()
section = models.ForeignKey('Section', related_name='titles')
title = models.CharField(_('Title'), max_length=256, null=False)
language = models.ForeignKey('Language')
class Meta:
ordering = ['title']
class Section(caching.base.CachingMixin, models.Model):
"""
Represents a multilingual section of one/many Issues of
a given Journal.
``legacy_code`` contains the section code used by the old
title manager. We've decided to store this value just by
historical reasons, and we don't know if it will last forever.
"""
#Custom manager
objects = SectionCustomManager()
nocacheobjects = models.Manager()
userobjects = modelmanagers.SectionManager()
journal = models.ForeignKey(Journal)
code = models.CharField(unique=True, max_length=21, blank=True)
legacy_code = models.CharField(null=True, blank=True, max_length=16)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
is_trashed = models.BooleanField(_('Is trashed?'), default=False, db_index=True)
def __unicode__(self):
return ' / '.join([sec_title.title for sec_title in self.titles.all().order_by('language')])
@property
def actual_code(self):
if not self.pk or not self.code:
raise AttributeError('section must be saved in order to have a code')
return self.code
def is_used(self):
try:
return True if self.issue_set.all().count() else False
except ValueError: # raised when the object is not yet saved
return False
def add_title(self, title, language):
"""
Adds a section title in the given language.
A Language instance must be passed as the language argument.
"""
SectionTitle.objects.create(section=self,
title=title, language=language)
def _suggest_code(self, | |
is computed.
Returns
-------
corr: float
Interquartile range of 1-dimensional sequence.
"""
#ny.percentile(sig, 75) - ny.percentile(sig, 25)
return np.percentile(sig, 75) - np.percentile(sig, 25)
# Kurtosis
def calc_kurtosis(sig):
"""Compute kurtosis along the specified axes.
Parameters
----------
input: ndarray
input from which kurtosis is computed.
Returns
-------
k: int
kurtosis result.
"""
return kurtosis(sig)
# Skewness
def calc_skewness(sig):
"""Compute skewness along the specified axes.
Parameters
----------
input: ndarray
input from which skewness is computed.
Returns
-------
s: int
skewness result.
"""
return skew(sig)
# Mean
def calc_mean(sig):
"""Compute mean value along the specified axes.
Parameters
----------
input: ndarray
input from which mean is computed.
Returns
-------
m: int
mean result.
"""
# m = mean(sig)
return np.mean(sig)
# Standard Deviation
def calc_std(sig):
"""Compute standard deviation (std) along the specified axes.
Parameters
----------
input: ndarray
input from which std is computed.
Returns
-------
std_value: int
std result.
"""
return np.std(sig)
# Interquartile Range
def calc_iqr(sig):
"""Compute interquartile range along the specified axes.
Parameters
----------
input: ndarray
input from which interquartile range is computed.
Returns
-------
iqr: int
interquartile range result.
"""
# iqr = subtract(*percentile(sig, [75, 25]))
return np.percentile(sig, 75) - np.percentile(sig, 25)
# Mean Absolute Deviation
def calc_meanad(sig):
"""Compute mean absolute deviation along the specified axes.
Parameters
----------
input: ndarray
input from which mean absolute deviation is computed.
Returns
-------
mad: int
mean absolute deviation result.
"""
m = np.mean(sig)
diff = [abs(x-m) for x in sig]
return np.mean(diff)
# Median Absolute Deviation
def calc_medad(sig):
"""Compute mean absolute deviation along the specified axes.
Parameters
----------
input: ndarray
input from which mean absolute deviation is computed.
Returns
-------
mad: int
mean absolute deviation result.
"""
m = np.median(sig)
diff = [abs(x-m) for x in sig]
return np.median(diff)
# Root Mean Square
def rms(sig):
"""Compute root mean square along the specified axes.
Parameters
----------
input: ndarray
input from which root mean square is computed.
Returns
-------
rms: int
square root of the arithmetic mean (average) of the squares of the original values.
"""
return np.sqrt(np.sum(np.array(sig)**2)/len(sig))
# Histogram for json format
def hist(sig, nbins, r):
"""Compute histogram along the specified axes.
Parameters
----------
sig: ndarray
input from histogram is computed.
nbins: int
the number of equal-width bins in the givel range.
rang: float
the lower(-r) and upper(r) range of the bins.
Returns
-------
histsig: ndarray
the values of the histogram
bin_edges: ndarray
the bin_edges, 'len(hist)+1'
"""
histsig, bin_edges = np.histogram(sig, bins=nbins[0], range=[-r[0], r[0]], density=True) #TODO:subsampling parameter
# bin_edges = bin_edges[:-1]
# bin_edges += (bin_edges[1]-bin_edges[0])/2.
return tuple(histsig)
def minpeaks(sig):
"""Compute number of minimum peaks along the specified axes.
Parameters
----------
sig: ndarray
Returns
-------
float
min number of peaks
"""
diff_sig = np.diff(sig)
return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd]<0 and diff_sig[nd + 1]>0)])
def maxpeaks(sig):
"""Compute number of peaks along the specified axes.
Parameters
----------
sig: ndarray
input from histogram is computed.
type: string
can be 'all', 'max', and 'min', and expresses which peaks are going to be accounted
Returns
-------
num_p: float
total number of peaks
"""
diff_sig = np.diff(sig)
return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd+1]<0 and diff_sig[nd]>0)])
# Spectral Centroid
def spectral_centroid(sign, fs): #center portion of the signal
"""Barycenter of the spectrum.
Parameters
----------
sign: ndarray
signal from which spectral slope is computed.
fs: int
sampling frequency of the signal
Returns
-------
spread: float
centroid
"""
f, ff = plotfft(sign, fs)
if not np.sum(ff):
return 0
else:
return np.dot(f,ff/np.sum(ff))
# Spectral Spread
def spectral_spread(sign, fs):
"""Measures the spread of the spectrum around its mean value.
Parameters
----------
sign: ndarray
signal from which spectral slope is computed.
fs: int
sampling frequency of the signal
Returns
-------
spread: float
spread
"""
f, ff = plotfft(sign, fs)
spect_centr = spectral_centroid(sign, fs)
if not np.sum(ff):
return 0
else:
return np.dot(((f-spect_centr)**2), (ff / np.sum(ff)))
# Spectral Skewness
def spectral_skewness(sign, fs):
"""Measures the asymmetry of a distribution around its mean value. Computed from the 3rd order moment.
Parameters
----------
sign: ndarray
signal from which spectral slope is computed.
fs: int
sampling frequency of the signal
Returns
-------
skeness: float
skeness
"""
f, ff = plotfft(sign, fs)
spect_centr = spectral_centroid(sign, fs)
if not spectral_spread(sign, fs):
return 0
else:
skew = ((f - spect_centr) ** 3) * (ff / np.sum(ff))
return np.sum(skew) / (spectral_spread(sign, fs) ** (3 / 2))
# Spectral Kurtosis
def spectral_kurtosis(sign, fs):
"""Measures the flatness of a distribution around its mean value. Computed from the 4th order moment.
Parameters
----------
sign: ndarray
signal from which spectral slope is computed.
fs: int
sampling frequency of the signal
Returns
-------
kurtosis: float
kurtosis
"""
f, ff = plotfft(sign, fs)
if not spectral_spread(sign, fs):
return 0
else:
spect_kurt = ((f - spectral_centroid(sign, fs)) ** 4) * (ff / np.sum(ff))
return np.sum(spect_kurt) / (spectral_spread(sign, fs)**2)
# Spectral Slope
def spectral_slope(sign, fs):
"""Computes the constants m and b of the function aFFT = mf + b, obtained by linear regression of the
spectral amplitude.
Parameters
----------
sign: ndarray
signal from which spectral slope is computed.
fs: int
sampling frequency of the signal
Returns
-------
m: float
slope
b: float
y-intercept
"""
f, ff = plotfft(sign, fs)
if not(list(f)):
return 0
else:
if not (len(f) * np.dot(f, f) - np.sum(f) ** 2):
return 0
else:
return (len(f) * np.dot(f, ff) - np.sum(f) * np.sum(ff)) / (len(f) * np.dot(f, f) - np.sum(f) ** 2)
# Spectral Decrease
def spectral_decrease(sign, fs):
"""Represents the amount of decraesing of the spectra amplitude.
Parameters
----------
sign: ndarray
signal from which spectral slope is computed.
fs: int
sampling frequency of the signal
Returns
-------
spectrak decrease
"""
f, ff = plotfft(sign, fs)
k = len(ff)
soma_num = 0
for a in range(2, k):
soma_num = soma_num + ((ff[a]-ff[1])/(a-1))
ff2 = ff[2:]
if not np.sum(ff2):
return 0
else:
soma_den = 1 / np.sum(ff2)
return soma_den * soma_num
def spectral_roll_on(sign, fs):
"""Compute the spectral roll-on of the signal, i.e., the frequency where 5% of the signal energy is contained
below of this value.
Parameters
----------
sign: ndarray
signal from which spectral slope is computed.
fs: int
sampling frequency of the signal
Returns
-------
roll_off: float
spectral roll-on
"""
output = 0
f, ff = plotfft(sign, fs)
cum_ff = np.cumsum(ff)
value = 0.05*(sum(ff))
for i in range(len(ff)):
if cum_ff[i] >= value:
output = f[i]
break
return output
def spectral_roll_off(sign, fs):
"""Compute the spectral roll-off of the signal, i.e., the frequency where 95% of the signal energy is contained
below of this value.
Parameters
----------
sign: ndarray
signal from which spectral slope is computed.
fs: int
sampling frequency of the signal
Returns
-------
roll_off: float
spectral roll-off
"""
output = 0
f, ff = plotfft(sign, fs)
cum_ff = np.cumsum(ff)
value = 0.95*(sum(ff))
for i in range(len(ff)):
if cum_ff[i] >= value:
output = f[i]
break
return output
def curve_distance(sign, fs):
"""Euclidean distance of the signal's cumulative sum of the FFT elements to the respective linear regression.
Parameters
----------
sign: ndarray
signal from which spectral slope is computed.
fs: int
sampling frequency of the signal
Returns
-------
curve distance: float
curve distance
"""
f, ff = plotfft(sign, fs)
cum_ff = np.cumsum(ff)
points_y = np.linspace(0, cum_ff[-1], len(cum_ff))
return np.sum(points_y-cum_ff)
def spect_variation(sign, fs):
"""Amount of variation of the spectrum along time. Computed from the normalized cross-correlation between two consecutive amplitude spectra.
Parameters
----------
sign: ndarray
signal from which spectral slope is computed.
fs: int
sampling frequency of the signal
Returns
-------
variation: float
"""
f, ff = plotfft(sign, fs)
energy, freq = signal_energy(ff, f)
sum1 = 0
sum2 = 0
sum3 = 0
for a in range(len(energy)-1):
sum1 = sum1+(energy[a-1]*energy[a])
sum2 = sum2+(energy[a-1]**2)
sum3 = sum3+(energy[a]**2)
if not sum2 or not sum3:
variation = 1
else:
variation = 1-((sum1)/((sum2**0.5)*(sum3**0.5)))
return variation
# Variance
def variance(sign, FS):
""" Measures how far the numbers are spread out.
Parameters
----------
sig: ndarray
input from histogram is computed.
fs: int
sampling frequency of the signal
Returns
-------
num_p: float
signal variance
"""
time = compute_time(sign, FS)
soma_den = 0
soma_num = 0
for z in range(0, len(sign)):
soma_num = soma_num + (time[z]*((sign[z]*np.mean(sign))**2))
soma_den = soma_den + | |
= OrderedDict([
('peer_address', YLeaf(YType.str, 'peer-address')),
('plsp_id', YLeaf(YType.int32, 'plsp-id')),
('tunnel_name', YLeaf(YType.str, 'tunnel-name')),
('pcc_address', YLeaf(YType.str, 'pcc-address')),
('tunnel_name_xr', YLeaf(YType.str, 'tunnel-name-xr')),
])
self.peer_address = None
self.plsp_id = None
self.tunnel_name = None
self.pcc_address = None
self.tunnel_name_xr = None
self.brief_lsp_information = YList(self)
self._segment_path = lambda: "tunnel-info" + "[peer-address='" + str(self.peer_address) + "']" + "[plsp-id='" + str(self.plsp_id) + "']" + "[tunnel-name='" + str(self.tunnel_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce-lsp-data/tunnel-infos/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(PceLspData.TunnelInfos.TunnelInfo, ['peer_address', 'plsp_id', 'tunnel_name', 'pcc_address', 'tunnel_name_xr'], name, value)
class BriefLspInformation(Entity):
"""
Brief LSP information
.. attribute:: source_address
Source address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: destination_address
Destination address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: tunnel_id
Tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: lspid
LSP ID
**type**\: int
**range:** 0..4294967295
.. attribute:: binding_sid
Binding SID
**type**\: int
**range:** 0..4294967295
.. attribute:: lsp_setup_type
LSP Setup Type
**type**\: :py:class:`LspSetup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.LspSetup>`
.. attribute:: operational_state
Operational state
**type**\: :py:class:`PcepLspState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PcepLspState>`
.. attribute:: administrative_state
Admin state
**type**\: :py:class:`LspState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.LspState>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.TunnelInfos.TunnelInfo.BriefLspInformation, self).__init__()
self.yang_name = "brief-lsp-information"
self.yang_parent_name = "tunnel-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('source_address', YLeaf(YType.str, 'source-address')),
('destination_address', YLeaf(YType.str, 'destination-address')),
('tunnel_id', YLeaf(YType.uint32, 'tunnel-id')),
('lspid', YLeaf(YType.uint32, 'lspid')),
('binding_sid', YLeaf(YType.uint32, 'binding-sid')),
('lsp_setup_type', YLeaf(YType.enumeration, 'lsp-setup-type')),
('operational_state', YLeaf(YType.enumeration, 'operational-state')),
('administrative_state', YLeaf(YType.enumeration, 'administrative-state')),
])
self.source_address = None
self.destination_address = None
self.tunnel_id = None
self.lspid = None
self.binding_sid = None
self.lsp_setup_type = None
self.operational_state = None
self.administrative_state = None
self._segment_path = lambda: "brief-lsp-information"
def __setattr__(self, name, value):
self._perform_setattr(PceLspData.TunnelInfos.TunnelInfo.BriefLspInformation, ['source_address', 'destination_address', 'tunnel_id', 'lspid', 'binding_sid', 'lsp_setup_type', 'operational_state', 'administrative_state'], name, value)
class LspSummary(Entity):
"""
LSP summary database in XTC
.. attribute:: all_ls_ps
Summary for all peers
**type**\: :py:class:`AllLsPs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.LspSummary.AllLsPs>`
.. attribute:: peer_ls_ps_info
Number of LSPs for specific peer
**type**\: list of :py:class:`PeerLsPsInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.LspSummary.PeerLsPsInfo>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.LspSummary, self).__init__()
self.yang_name = "lsp-summary"
self.yang_parent_name = "pce-lsp-data"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("all-ls-ps", ("all_ls_ps", PceLspData.LspSummary.AllLsPs))])
self._child_list_classes = OrderedDict([("peer-ls-ps-info", ("peer_ls_ps_info", PceLspData.LspSummary.PeerLsPsInfo))])
self._leafs = OrderedDict()
self.all_ls_ps = PceLspData.LspSummary.AllLsPs()
self.all_ls_ps.parent = self
self._children_name_map["all_ls_ps"] = "all-ls-ps"
self._children_yang_names.add("all-ls-ps")
self.peer_ls_ps_info = YList(self)
self._segment_path = lambda: "lsp-summary"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce-lsp-data/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(PceLspData.LspSummary, [], name, value)
class AllLsPs(Entity):
"""
Summary for all peers
.. attribute:: all_ls_ps
Number of all LSPs
**type**\: int
**range:** 0..4294967295
.. attribute:: up_ls_ps
Number of operational LSPs
**type**\: int
**range:** 0..4294967295
.. attribute:: admin_up_ls_ps
Number of administratively up LSPs
**type**\: int
**range:** 0..4294967295
.. attribute:: sr_ls_ps
Number of LSPs with Segment routing setup type
**type**\: int
**range:** 0..4294967295
.. attribute:: rsvp_ls_ps
Number of LSPs with RSVP setup type
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.LspSummary.AllLsPs, self).__init__()
self.yang_name = "all-ls-ps"
self.yang_parent_name = "lsp-summary"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('all_ls_ps', YLeaf(YType.uint32, 'all-ls-ps')),
('up_ls_ps', YLeaf(YType.uint32, 'up-ls-ps')),
('admin_up_ls_ps', YLeaf(YType.uint32, 'admin-up-ls-ps')),
('sr_ls_ps', YLeaf(YType.uint32, 'sr-ls-ps')),
('rsvp_ls_ps', YLeaf(YType.uint32, 'rsvp-ls-ps')),
])
self.all_ls_ps = None
self.up_ls_ps = None
self.admin_up_ls_ps = None
self.sr_ls_ps = None
self.rsvp_ls_ps = None
self._segment_path = lambda: "all-ls-ps"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce-lsp-data/lsp-summary/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(PceLspData.LspSummary.AllLsPs, ['all_ls_ps', 'up_ls_ps', 'admin_up_ls_ps', 'sr_ls_ps', 'rsvp_ls_ps'], name, value)
class PeerLsPsInfo(Entity):
"""
Number of LSPs for specific peer
.. attribute:: lsp_summary
Number of LSPs for specific peer
**type**\: :py:class:`LspSummary_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.LspSummary.PeerLsPsInfo.LspSummary_>`
.. attribute:: peer_address
Peer IPv4 address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.LspSummary.PeerLsPsInfo, self).__init__()
self.yang_name = "peer-ls-ps-info"
self.yang_parent_name = "lsp-summary"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("lsp-summary", ("lsp_summary", PceLspData.LspSummary.PeerLsPsInfo.LspSummary_))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('peer_address', YLeaf(YType.str, 'peer-address')),
])
self.peer_address = None
self.lsp_summary = PceLspData.LspSummary.PeerLsPsInfo.LspSummary_()
self.lsp_summary.parent = self
self._children_name_map["lsp_summary"] = "lsp-summary"
self._children_yang_names.add("lsp-summary")
self._segment_path = lambda: "peer-ls-ps-info"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce-lsp-data/lsp-summary/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(PceLspData.LspSummary.PeerLsPsInfo, ['peer_address'], name, value)
class LspSummary_(Entity):
"""
Number of LSPs for specific peer
.. attribute:: all_ls_ps
Number of all LSPs
**type**\: int
**range:** 0..4294967295
.. attribute:: up_ls_ps
Number of operational LSPs
**type**\: int
**range:** 0..4294967295
.. attribute:: admin_up_ls_ps
Number of administratively up LSPs
**type**\: int
**range:** 0..4294967295
.. attribute:: sr_ls_ps
Number of LSPs with Segment routing setup type
**type**\: int
**range:** 0..4294967295
.. attribute:: rsvp_ls_ps
Number of LSPs with RSVP setup type
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.LspSummary.PeerLsPsInfo.LspSummary_, self).__init__()
self.yang_name = "lsp-summary"
self.yang_parent_name = "peer-ls-ps-info"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('all_ls_ps', YLeaf(YType.uint32, 'all-ls-ps')),
('up_ls_ps', YLeaf(YType.uint32, 'up-ls-ps')),
('admin_up_ls_ps', YLeaf(YType.uint32, 'admin-up-ls-ps')),
('sr_ls_ps', YLeaf(YType.uint32, 'sr-ls-ps')),
('rsvp_ls_ps', YLeaf(YType.uint32, 'rsvp-ls-ps')),
])
self.all_ls_ps = None
self.up_ls_ps = None
self.admin_up_ls_ps = None
self.sr_ls_ps = None
self.rsvp_ls_ps = None
self._segment_path = lambda: "lsp-summary"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce-lsp-data/lsp-summary/peer-ls-ps-info/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(PceLspData.LspSummary.PeerLsPsInfo.LspSummary_, ['all_ls_ps', 'up_ls_ps', 'admin_up_ls_ps', 'sr_ls_ps', 'rsvp_ls_ps'], name, value)
class TunnelDetailInfos(Entity):
"""
Detailed tunnel database in XTC
.. attribute:: tunnel_detail_info
Detailed tunnel information
**type**\: list of :py:class:`TunnelDetailInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.TunnelDetailInfos.TunnelDetailInfo>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.TunnelDetailInfos, self).__init__()
self.yang_name = "tunnel-detail-infos"
self.yang_parent_name = "pce-lsp-data"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("tunnel-detail-info", ("tunnel_detail_info", PceLspData.TunnelDetailInfos.TunnelDetailInfo))])
self._leafs = OrderedDict()
self.tunnel_detail_info = YList(self)
self._segment_path = lambda: "tunnel-detail-infos"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce-lsp-data/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(PceLspData.TunnelDetailInfos, [], name, value)
class TunnelDetailInfo(Entity):
"""
Detailed tunnel information
.. attribute:: peer_address (key)
Peer Address
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: plsp_id (key)
PCEP LSP ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: tunnel_name (key)
Tunnel name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: private_lsp_information
Private LSP information
**type**\: :py:class:`PrivateLspInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.TunnelDetailInfos.TunnelDetailInfo.PrivateLspInformation>`
.. attribute:: pcc_address
PCC address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: tunnel_name_xr
Tunnel Name
**type**\: str
.. attribute:: detail_lsp_information
Detail LSP information
**type**\: list of :py:class:`DetailLspInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.TunnelDetailInfos.TunnelDetailInfo.DetailLspInformation>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.TunnelDetailInfos.TunnelDetailInfo, self).__init__()
self.yang_name = "tunnel-detail-info"
self.yang_parent_name = "tunnel-detail-infos"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['peer_address','plsp_id','tunnel_name']
self._child_container_classes = OrderedDict([("private-lsp-information", ("private_lsp_information", PceLspData.TunnelDetailInfos.TunnelDetailInfo.PrivateLspInformation))])
self._child_list_classes = OrderedDict([("detail-lsp-information", ("detail_lsp_information", PceLspData.TunnelDetailInfos.TunnelDetailInfo.DetailLspInformation))])
self._leafs = OrderedDict([
('peer_address', YLeaf(YType.str, 'peer-address')),
('plsp_id', YLeaf(YType.int32, 'plsp-id')),
('tunnel_name', YLeaf(YType.str, 'tunnel-name')),
('pcc_address', YLeaf(YType.str, 'pcc-address')),
('tunnel_name_xr', YLeaf(YType.str, 'tunnel-name-xr')),
])
self.peer_address = None
self.plsp_id = None
self.tunnel_name = None
self.pcc_address = None
self.tunnel_name_xr = None
self.private_lsp_information = PceLspData.TunnelDetailInfos.TunnelDetailInfo.PrivateLspInformation()
self.private_lsp_information.parent = self
self._children_name_map["private_lsp_information"] = "private-lsp-information"
self._children_yang_names.add("private-lsp-information")
self.detail_lsp_information = YList(self)
self._segment_path = lambda: "tunnel-detail-info" + "[peer-address='" + str(self.peer_address) + "']" + "[plsp-id='" + str(self.plsp_id) + "']" + "[tunnel-name='" + str(self.tunnel_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce-lsp-data/tunnel-detail-infos/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(PceLspData.TunnelDetailInfos.TunnelDetailInfo, ['peer_address', 'plsp_id', 'tunnel_name', 'pcc_address', 'tunnel_name_xr'], name, value)
class PrivateLspInformation(Entity):
"""
Private LSP information
.. attribute:: event_buffer
LSP Event buffer
**type**\: list of :py:class:`EventBuffer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.TunnelDetailInfos.TunnelDetailInfo.PrivateLspInformation.EventBuffer>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.TunnelDetailInfos.TunnelDetailInfo.PrivateLspInformation, self).__init__()
self.yang_name = "private-lsp-information"
self.yang_parent_name = "tunnel-detail-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("event-buffer", ("event_buffer", PceLspData.TunnelDetailInfos.TunnelDetailInfo.PrivateLspInformation.EventBuffer))])
self._leafs = OrderedDict()
self.event_buffer = YList(self)
self._segment_path = lambda: "private-lsp-information"
def __setattr__(self, name, value):
self._perform_setattr(PceLspData.TunnelDetailInfos.TunnelDetailInfo.PrivateLspInformation, [], name, value)
class EventBuffer(Entity):
"""
LSP Event buffer
.. attribute:: event_message
Event message
**type**\: str
.. attribute:: time_stamp
Event time, relative to Jan 1, 1970
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.TunnelDetailInfos.TunnelDetailInfo.PrivateLspInformation.EventBuffer, self).__init__()
| |
from collections import defaultdict
from typing import Iterable, List
import torch
from ppq.core import (COMPELING_OP_TYPES, LINEAR_ACTIVATIONS,
ORT_OOS_FUSE_START_OPS, PPLCUDA_ACTIVATIONS,
QuantizationProperty, QuantizationStates, RoundingPolicy,
TargetPlatform, TensorQuantizationConfig,
empty_ppq_cache)
from ppq.executor import BaseGraphExecutor
from ppq.IR import GraphCommandProcesser, QuantableOperation, Variable
from ppq.IR.base.graph import Operation
from ppq.IR.quantize import QuantableVariable
from ppq.IR.search import SearchableGraph, TraversalCommand
from ppq.quantization.observer.range import minmax_to_scale_offset
from .base import QuantizationOptimizationPass
class QuantizeReducePass(QuantizationOptimizationPass):
"""
QuantizeReducePass 用来简化量化定点信息:通常每一个 Quantable 算子都有前后两个定点信息,
而运算时通常可以屏蔽一半定点信息以加速。QuantizeReducePass 被设计用来找出可以屏蔽的定点信息。
对于两个相邻算子(op_1 -> op_2)而言,将会出现以下几种情况
1. op_1 与 op_2 均不量化,此时无需对数据流进行额外处理
2. op_1 量化,op_2 不量化,op_1 需要对结果进行量化
3. op_1 不量化,op_2 量化,此时需要按 op_2 的量化参数对数据流进行量化
4. op_1 与 op_2 均量化,此时分情况讨论:
4.1. op_1 量化位宽高于 op_2,此时按 op_2 的量化参数对数据流进行量化
4.2. op_1 量化位宽低于 op_2,此时按 op_1 的量化参数对数据流进行量化
4.3. op_1 量化位等于 op_2,此时按 op_1 的量化参数对数据流进行量化
------> op_2
对于更为复杂的网络结构 op_1 ----+
------> op_3
op_1 如果有定点信息,则必须对数据流进行量化
op_2, op_3 则需要分别确认是否需要再次对输入数据执行再次量化
总结:
当 下游节点 的量化位宽大于等于 上游节点 时,按 上游节点 的量化信息执行量化,此时量化操作发生在上游
当 下游节点 的量化位宽小于 上游节点 时,按 下游节点 的量化信息执行量化,此时量化操作发生在下游(上游量化未必可以省略)
QuantizeReducePass is used to reduce quantization fixation: we could block half of fixation points to accelerate
the inference
for 2 neighbouring ops(op_1 -> op_2), there are several situations:
1. neither of op_1 and op_2 needs quantization
2. op_1 needs quantization while op_2 doesn't
3. op_2 needs quantization while op_1 does
4. both need quantization:
4.1. bit width of op_1 is larger than op_2, then we should use quantization parameters of op_2
4.2. bit width of op_2 is larger than op_1, then we should use quantization parameters of op_1
4.3. equal, we should use quantization parameters of op_1
Conclusion:
when the bit width of downstream op is larger or equal to that of upstream op, we should use quantization
information of upstream op, otherwise we should use quantization information of downstream op(and the upstream
quantization may not be omitted)
"""
def __init__(self) -> None:
super().__init__(name='PPQ Quantize Point Reduce Pass')
def optimize(
self,
processer: GraphCommandProcesser,
dataloader: Iterable,
executor: BaseGraphExecutor,
**kwargs
) -> None:
graph = processer.graph
for _, varaible in graph.variables.items():
assert isinstance(varaible, Variable)
source_op = varaible.source_op
if source_op is None: continue # input variables in network, they do not have a source
if not isinstance(source_op, QuantableOperation): continue
source_config = source_op.config.output_quantization_config[source_op.outputs.index(varaible)]
for downstream_op, dest_idx in zip(varaible.dest_ops, varaible.dest_idx):
if downstream_op is None: continue # output variables in network, they do not have a destination
if not isinstance(downstream_op, QuantableOperation): continue
input_config = downstream_op.config.input_quantization_config[dest_idx]
if source_op.platform == downstream_op.platform:
if input_config.state == QuantizationStates.INITIAL:
input_config.dominated_by = source_config
class QuantizeRefinePass(QuantizationOptimizationPass):
"""
修复算子上的定点错误,主要针对 Onnx 的一些特殊算子,其部分输入需要定点,而部分输入不需要定点
例如对于 Reshape 算子而言,其存在 data, shape 两个输入,其中 shape 不需要定点
因此 QuantizeRefinePass 会纠正 Reshape 算子的 Quantization config,避免错误地对 shape 输入进行量化。
目前我们针对 'Reshape', 'Slice', 'Gather', 'Clip', 'Pad', 'Resize', 'Split' 算子进行了详细讨论
修正了已知的所有量化行为错误
对于所有平台的 Quantizer 而言,都应当调用 QuantizeRefinePass 修复上述量化行为错误
customize quantization for special operators, more specificly, for certain op, some of inputs
need quantization while some don't, this pass refines quantization behaviors of
'Reshape', 'Slice', 'Gather', 'Clip', 'Pad', 'Resize', 'Split' ops
this pass should be applied regardless of backend platforms
"""
def __init__(self) -> None:
super().__init__(name='PPQ Quantization Config Refine Pass')
@ empty_ppq_cache
def optimize(
self,
processer: GraphCommandProcesser,
dataloader: Iterable,
executor: BaseGraphExecutor,
**kwargs
) -> None:
graph = processer.graph
for _, operation in graph.operations.items():
if not isinstance(operation, QuantableOperation): continue
if operation.type in {'Reshape', 'Slice', 'Gather', 'Clip', 'Pad', 'Resize', 'Split'}:
if operation.type == 'Reshape':
# Inputs:
# data (differentiable) : T
# An input tensor.
# shape (non-differentiable) : tensor(int64)
# Specified shape for output.
# see aslo https://github.com/onnx/onnx/blob/master/docs/Operators.md#Reshape
assert len(operation.config.input_quantization_config) == 2, f'Reshape Operation {operation.name} should have exact 2 inputs, '\
f'while {len(operation.config.input_quantization_config)} was given, is graph defination different from onnx opset 11?'
operation.config.input_quantization_config[-1].state = QuantizationStates.SOI
continue
if operation.type == 'Slice':
# Inputs (3 - 5)
# data (differentiable) : T
# Tensor of data to extract slices from.
# starts (non-differentiable) : Tind
# 1-D tensor of starting indices of corresponding axis in `axes`
# ends (non-differentiable) : Tind
# 1-D tensor of ending indices (exclusive) of corresponding axis in `axes`
# axes (optional, non-differentiable) : Tind
# 1-D tensor of axes that `starts` and `ends` apply to. Negative value means
# counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).
# steps (optional, non-differentiable) : Tind
# 1-D tensor of slice step of corresponding axis in `axes`.
# Negative value means slicing backward. 'steps' cannot be 0. Defaults to 1.
# see aslo https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Slice-11
assert len(operation.config.input_quantization_config) in {3, 4, 5}, f'Reshape {operation.name} Operation should have 3 - 5 inputs, '\
f'while {len(operation.config.input_quantization_config)} was given, is graph defination different from onnx opset 11?'
for config in operation.config.input_quantization_config[1: ]:
config.state = QuantizationStates.SOI
continue
if operation.type == 'Gather':
# Inputs
# data (differentiable) : T
# Tensor of rank r >= 1.
# indices (non-differentiable) : Tind
# Tensor of int32/int64 indices, of any rank q.
# All index values are expected to be within bounds [-s, s-1] along axis of size s.
# It is an error if any of the index values are out of bounds.
# see also https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Gather-11
assert len(operation.config.input_quantization_config) == 2, f'Gather Operation {operation.name} should have 2 inputs, '\
f'while {len(operation.config.input_quantization_config)} was given, is graph defination different from onnx opset 11?'
operation.config.input_quantization_config[-1].state = QuantizationStates.SOI
continue
if operation.type == 'Clip':
# Inputs (1 - 3)
# input : T
# Input tensor whose elements to be clipped
# min (optional) : T
# Minimum value, under which element is replaced by min.
# It must be a scalar(tensor of empty shape).
# max (optional) : T
# Maximum value, above which element is replaced by max.
# It must be a scalar(tensor of empty shape).
# see aslo https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Clip-11
assert len(operation.config.input_quantization_config) in {1, 2, 3}, f'Clip Operation {operation.name} should have 1 - 3 inputs, '\
f'while {len(operation.config.input_quantization_config)} was given, is graph defination different from onnx opset 11?'
for config in operation.config.input_quantization_config[1: ]:
config.state = QuantizationStates.PASSIVE_INIT
continue
if operation.type == 'Pad':
# Inputs (2 - 3)
# data : T
# Input tensor.
# pads : tensor(int64)
# Tensor of integers indicating the number of padding elements to add or remove
# (if negative) at the beginning and end of each axis.
# For 2D input tensor, it is the number of pixels. `pads` should be a 1D tensor of shape [2 * input_rank].
# `pads` format should be: [x1_begin, x2_begin,...,x1_end, x2_end,...],
# where xi_begin is the number of pad values added at the beginning of axis `i` and xi_end,
# the number of pad values added at the end of axis `i`.
# constant_value (optional) : T
# (Optional) A scalar value to be used if the mode chosen is `constant` (by default it is 0).
# https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Pad-11
assert len(operation.config.input_quantization_config) in {2, 3}, f'Pad Operation {operation.name} should have 2 - 3 inputs, '\
f'while {len(operation.config.input_quantization_config)} was given, is graph defination different from onnx opset 11?'
operation.config.input_quantization_config[1].state = QuantizationStates.SOI
if len(operation.config.input_quantization_config) == 3:
operation.config.input_quantization_config[-1].state = QuantizationStates.PASSIVE_INIT
continue
if operation.type == 'Resize':
# Inputs (3 - 4)
# X : T1
# N-D tensor
# roi : T2
# 1-D tensor given as [start1, ..., startN, end1, ..., endN],
# where N is the rank of X. The RoIs' coordinates are normalized in the coordinate system of the input image.
# It only takes effect when coordinate_transformation_mode is "tf_crop_and_resize"
# scales : tensor(float)
# The scale array along each dimension.
# It takes value greater than 0. If it's less than 1, it's sampling down,
# otherwise, it's upsampling. The number of elements of 'scales' should be the same as the rank of input 'X'.
# Only one of 'scales' and 'sizes' can be specified.
# If 'size' is needed, the user can use an empty string as the name of 'scales' in this operator's input list.
# sizes (optional) : tensor(int64)
# The size of the output tensor.
# The number of elements of 'sizes' should be the same as the rank of input 'X'.
| |
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vimmjm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uimjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vimjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uijpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vijpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uipjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vipjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uimjmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vimjmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uijmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vijmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uipjmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vipjmm')
SETermsRef.append([ind1, ind2, -1, -1])
if SEmodel>=11:
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uippjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vippjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uippjmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vippjmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimmjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vimmjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimmjmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vimmjmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uippjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vippjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uippjmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vippjmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uimmjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vimmjpp')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uimmjmm')
SETermsRef.append([ind1, ind2, -1, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vimmjmm')
SETermsRef.append([ind1, ind2, -1, -1])
if SEmodel>=12:
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipj')
ind3 = ResolvedVarNames.index('Uipj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vipj')
ind3 = ResolvedVarNames.index('Vipj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipj')
ind3 = ResolvedVarNames.index('Vipj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimj')
ind3 = ResolvedVarNames.index('Uimj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vimj')
ind3 = ResolvedVarNames.index('Vimj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimj')
ind3 = ResolvedVarNames.index('Vimj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uijp')
ind3 = ResolvedVarNames.index('Uijp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vijp')
ind3 = ResolvedVarNames.index('Vijp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uijp')
ind3 = ResolvedVarNames.index('Vijp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uijm')
ind3 = ResolvedVarNames.index('Uijm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vijm')
ind3 = ResolvedVarNames.index('Vijm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind2 = ResolvedVarNames.index('Uijm')
ind3 = ResolvedVarNames.index('Vijm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uipj')
ind3 = ResolvedVarNames.index('Uipj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vipj')
ind3 = ResolvedVarNames.index('Vipj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uipj')
ind3 = ResolvedVarNames.index('Vipj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uimj')
ind3 = ResolvedVarNames.index('Uimj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vimj')
ind3 = ResolvedVarNames.index('Vimj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uimj')
ind3 = ResolvedVarNames.index('Vimj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uijp')
ind3 = ResolvedVarNames.index('Uijp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vijp')
ind3 = ResolvedVarNames.index('Vijp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uijp')
ind3 = ResolvedVarNames.index('Vijp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uijm')
ind3 = ResolvedVarNames.index('Uijm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vijm')
ind3 = ResolvedVarNames.index('Vijm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uijm')
ind3 = ResolvedVarNames.index('Vijm')
SETermsRef.append([ind1, ind2, ind3, -1])
if SEmodel>=13:
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipjp')
ind3 = ResolvedVarNames.index('Uipjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vipjp')
ind3 = ResolvedVarNames.index('Vipjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipjp')
ind3 = ResolvedVarNames.index('Vipjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipjm')
ind3 = ResolvedVarNames.index('Uipjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vipjm')
ind3 = ResolvedVarNames.index('Vipjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipjm')
ind3 = ResolvedVarNames.index('Vipjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimjp')
ind3 = ResolvedVarNames.index('Uimjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vimjp')
ind3 = ResolvedVarNames.index('Vimjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimjp')
ind3 = ResolvedVarNames.index('Vimjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimjm')
ind3 = ResolvedVarNames.index('Uimjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vimjm')
ind3 = ResolvedVarNames.index('Vimjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimjm')
ind3 = ResolvedVarNames.index('Vimjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uipjp')
ind3 = ResolvedVarNames.index('Uipjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vipjp')
ind3 = ResolvedVarNames.index('Vipjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uipjp')
ind3 = ResolvedVarNames.index('Vipjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uipjm')
ind3 = ResolvedVarNames.index('Uipjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vipjm')
ind3 = ResolvedVarNames.index('Vipjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uipjm')
ind3 = ResolvedVarNames.index('Vipjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uimjp')
ind3 = ResolvedVarNames.index('Uimjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vimjp')
ind3 = ResolvedVarNames.index('Vimjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uimjp')
ind3 = ResolvedVarNames.index('Vimjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uimjm')
ind3 = ResolvedVarNames.index('Uimjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Vimjm')
ind3 = ResolvedVarNames.index('Vimjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Vij')
ind2 = ResolvedVarNames.index('Uimjm')
ind3 = ResolvedVarNames.index('Vimjm')
SETermsRef.append([ind1, ind2, ind3, -1])
if SEmodel>=14:
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipjp')
ind3 = ResolvedVarNames.index('Uipj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vipjp')
ind3 = ResolvedVarNames.index('Vipj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipjp')
ind3 = ResolvedVarNames.index('Vipj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vipjp')
ind3 = ResolvedVarNames.index('Uipj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipj')
ind3 = ResolvedVarNames.index('Uipjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vipj')
ind3 = ResolvedVarNames.index('Vipjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipj')
ind3 = ResolvedVarNames.index('Vipjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vipj')
ind3 = ResolvedVarNames.index('Uipjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipjm')
ind3 = ResolvedVarNames.index('Uijm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vipjm')
ind3 = ResolvedVarNames.index('Vijm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uipjm')
ind3 = ResolvedVarNames.index('Vijm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vipjm')
ind3 = ResolvedVarNames.index('Uijm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uijm')
ind3 = ResolvedVarNames.index('Uimjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vijm')
ind3 = ResolvedVarNames.index('Vimjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uijm')
ind3 = ResolvedVarNames.index('Vimjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vijm')
ind3 = ResolvedVarNames.index('Uimjm')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimjm')
ind3 = ResolvedVarNames.index('Uimj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vimjm')
ind3 = ResolvedVarNames.index('Vimj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimjm')
ind3 = ResolvedVarNames.index('Vimj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vimjm')
ind3 = ResolvedVarNames.index('Uimj')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimj')
ind3 = ResolvedVarNames.index('Uimjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vimj')
ind3 = ResolvedVarNames.index('Vimjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimj')
ind3 = ResolvedVarNames.index('Vimjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Vimj')
ind3 = ResolvedVarNames.index('Uimjp')
SETermsRef.append([ind1, ind2, ind3, -1])
ind1 = ResolvedVarNames.index('Uij')
ind2 = ResolvedVarNames.index('Uimjp')
ind3 = ResolvedVarNames.index('Uijp')
SETermsRef.append([ind1, | |
of the sound
device, and not the format you gave to `Mix_OpenAudio`, although they
may in reality be the same. This is an unfortunate but necessary speed
concern. Use `Mix_QuerySpec` to determine if you can handle the data
before you register your effect, and take appropriate actions.
You may also specify a callback (`d`) that is called when the channel
finishes playing. This gives you a more fine-grained control than
`Mix_ChannelFinished`, in case you need to free effect-specific
resources, etc. If you don't need this, you can specify None.
You may set the callbacks before or after calling `Mix_PlayChannel`.
Things like `Mix_SetPanning` are just internal special effect
functions, so if you are using that, you've already incurred the
overhead of a copy to a separate buffer, and that these effects will be
in the queue with any functions you've registered. The list of
registered effects for a channel is reset when a chunk finishes
playing, so you need to explicitly set them with each call to
``Mix_PlayChannel*``.
You may also register a special effect function that is to be run after
final mixing occurs. The rules for these callbacks are identical to
those in `Mix_RegisterEffect`, but they are run after all the channels
and the music have been mixed into a single stream, whereas
channel-specific effects run on a given channel before any other mixing
occurs. These global effect callbacks are call "posteffects".
Posteffects only have their `d` function called when they are
unregistered (since the main output stream is never "done" in the same
sense as a channel). You must unregister them manually when you've had
enough. Your callback will be told that the channel being mixed is
(`MIX_CHANNEL_POST`) if the processing is considered a posteffect.
After all these effects have finished processing, the callback
registered through `Mix_SetPostMix` runs, and then the stream goes to
the audio device.
Do not call `SDL_LockAudio` from your callback function.
:Parameters:
`chan` : int
Channel to set effect on, or `MIX_CHANNEL_POST` for postmix.
`f` : function
Callback function for effect. Must have the signature
(channel: int, stream: `SDL_array`, udata: any) -> None;
where channel is the channel being affected, stream contains
the audio data and udata is the user variable passed in to
this function.
`d` : function
Callback function for when the effect is done. The function
must have the signature (channel: int, udata: any) -> None.
`arg` : any
User data passed to both callbacks.
"""
f = _make_MixEffectFunc(f, arg)
d = _make_MixEffectDoneFunc(d, arg)
_effect_func_refs.append(f)
_effect_func_refs.append(d)
# TODO: override EffectDone callback to remove refs and prevent
# memory leak. Be careful with MIX_CHANNEL_POST
_Mix_RegisterEffect(chan, f, d, arg)
# Mix_UnregisterEffect cannot be implemented
Mix_UnregisterAllEffects = _dll.function(
'Mix_UnregisterAllEffects',
'''Unregister all effects for a channel.
You may not need to call this explicitly, unless you need to stop all
effects from processing in the middle of a chunk's playback. Note that
this will also shut off some internal effect processing, since
`Mix_SetPanning` and others may use this API under the hood. This is
called internally when a channel completes playback.
Posteffects are never implicitly unregistered as they are for channels,
but they may be explicitly unregistered through this function by
specifying `MIX_CHANNEL_POST` for a channel.
:Parameters:
- `channel`: int
''',
args=['channel'],
arg_types=[c_int],
return_type=c_int,
error_return=0)
Mix_SetPanning = _dll.function(
'Mix_SetPanning',
'''Set the panning of a channel.
The left and right channels are specified as integers between 0 and
255, quietest to loudest, respectively.
Technically, this is just individual volume control for a sample with
two (stereo) channels, so it can be used for more than just panning.
If you want real panning, call it like this::
Mix_SetPanning(channel, left, 255 - left)
Setting (channel) to `MIX_CHANNEL_POST` registers this as a posteffect, and
the panning will be done to the final mixed stream before passing it on
to the audio device.
This uses the `Mix_RegisterEffect` API internally, and returns without
registering the effect function if the audio device is not configured
for stereo output. Setting both (left) and (right) to 255 causes this
effect to be unregistered, since that is the data's normal state.
:Parameters:
- `channel`: int
- `left`: int
- `right`: int
''',
args=['channel', 'left', 'right'],
arg_types=[c_int, c_ubyte, c_ubyte],
return_type=c_int,
error_return=0)
Mix_SetPosition = _dll.function(
'Mix_SetPosition',
'''Set the position of a channel.
`angle` is an integer from 0 to 360, that specifies the location of the
sound in relation to the listener. `angle` will be reduced as neccesary
(540 becomes 180 degrees, -100 becomes 260). Angle 0 is due north, and
rotates clockwise as the value increases. For efficiency, the
precision of this effect may be limited (angles 1 through 7 might all
produce the same effect, 8 through 15 are equal, etc). `distance` is
an integer between 0 and 255 that specifies the space between the sound
and the listener. The larger the number, the further away the sound is.
Using 255 does not guarantee that the channel will be culled from the
mixing process or be completely silent. For efficiency, the precision
of this effect may be limited (distance 0 through 5 might all produce
the same effect, 6 through 10 are equal, etc). Setting `angle` and
`distance` to 0 unregisters this effect, since the data would be
unchanged.
If you need more precise positional audio, consider using OpenAL for
spatialized effects instead of SDL_mixer. This is only meant to be a
basic effect for simple "3D" games.
If the audio device is configured for mono output, then you won't get
any effectiveness from the angle; however, distance attenuation on the
channel will still occur. While this effect will function with stereo
voices, it makes more sense to use voices with only one channel of
sound, so when they are mixed through this effect, the positioning will
sound correct. You can convert them to mono through SDL before giving
them to the mixer in the first place if you like.
Setting `channel` to `MIX_CHANNEL_POST` registers this as a posteffect,
and the positioning will be done to the final mixed stream before
passing it on to the audio device.
This is a convenience wrapper over `Mix_SetDistance` and
`Mix_SetPanning`.
:Parameters:
- `channel`: int
- `angle`: int
- `distance`: int
''',
args=['channel', 'angle', 'distance'],
arg_types=[c_int, c_short, c_ubyte],
return_type=c_int,
error_return=0)
Mix_SetDistance = _dll.function(
'Mix_SetDistance',
'''Set the "distance" of a channel.
`distance` is an integer from 0 to 255 that specifies the location of
the sound in relation to the listener. Distance 0 is overlapping the
listener, and 255 is as far away as possible A distance of 255 does not
guarantee silence; in such a case, you might want to try changing the
chunk's volume, or just cull the sample from the mixing process with
`Mix_HaltChannel`.
For efficiency, the precision of this effect may be limited (distances
1 through 7 might all produce the same effect, 8 through 15 are equal,
etc). `distance` is an integer between 0 and 255 that specifies the
space between the sound and the listener. The larger the number, the
further away the sound is.
Setting `distance` to 0 unregisters this effect, since the data would
be unchanged.
If you need more precise positional audio, consider using OpenAL for
spatialized effects instead of SDL_mixer. This is only meant to be a
basic effect for simple "3D" games.
Setting `channel` to `MIX_CHANNEL_POST` registers this as a posteffect,
and the distance attenuation will be done to the final mixed stream
before passing it on to the audio device.
This uses the `Mix_RegisterEffect` API internally.
:Parameters:
- `channel`: int
- `distance`: distance
''',
args=['channel', 'distance'],
arg_types=[c_int, c_ubyte],
return_type=c_int,
error_return=0)
Mix_SetReverseStereo = _dll.function(
'Mix_SetReverseStereo',
'''Causes a channel to reverse its stereo.
This is handy if the user has his or her speakers hooked up backwards,
or you would like to have a minor bit | |
command (%s for help, %s to quit): ' % (cyan('h'),
cyan('q'))
if num is not None:
choice = num
else:
choice = get_input(msg)
if isinstance(choice, int):
if 0 <= choice <= len(entries):
break
else:
if num is not None:
exit("Invalid number {}: must be between 0 and {}"
.format(num, len(entries) - 1))
else:
msg = "Invalid number: must be between 0 and %s"
print(msg % (len(entries) - 1))
elif choice == 'x':
if command is None:
print('No command has been set. Set command with `c`')
else:
return _run_ssh_command(entries, username, idfile,
command, tunnel)
elif choice == 'h':
_print_help = True
elif choice in ['q', 'quit', 'exit']:
print('bye!')
return
else:
# All of these commands take one or more arguments, so the
# split length must be at least 2.
commands = choice.split()
if len(commands) < 2:
print(yellow('Unknown command "%s".' % choice))
else:
cmd = commands[0]
if cmd in ['u', 'i', 'p']:
if cmd == 'u':
username = commands[1]
elif cmd == 'i':
_idfile = commands[1]
if not os.path.exists(_idfile):
print(yellow('No such file: %s' % _idfile))
continue
idfile = _idfile
elif cmd == 'p':
p = commands[1]
try:
profile = LsiProfile.load(p)
_username = profile.username
_idfile = expanduser(profile.identity_file)
except LsiProfile.LoadError:
print(yellow('No such profile: %s' % repr(p)))
continue
username = _username
idfile = _idfile
print('username: %s' % green(repr(username)))
print('identity file: %s' % green(repr(idfile)))
elif cmd == 'f':
entries = filter_entries(entries, commands[1:], [])
_print_entries = True
elif cmd == 'e':
entries = filter_entries(entries, [], commands[1:])
_print_entries = True
elif cmd == 'c':
command = ' '.join(commands[1:])
elif cmd == 'limit':
try:
limit = int(commands[1])
_print_entries = True
except ValueError:
print(yellow('Invalid limit (must be an integer)'))
elif cmd == 'sort':
sort_by = commands[1]
if sort_by not in show:
show.append(sort_by)
_print_entries = True
elif cmd == 'show':
if show is None:
show = commands[1:]
else:
show.extend(commands[1:])
_print_entries = True
else:
print(yellow('Unknown command "%s".' % cmd))
return _connect_ssh(entries[choice], username, idfile, tunnel)
def _get_path(cmd):
"""Queries bash to find the path to a commmand on the system."""
if cmd in _PATHS:
return _PATHS[cmd]
out = subprocess.check_output('which {}'.format(cmd), shell=True)
_PATHS[cmd] = out.decode("utf-8").strip()
return _PATHS[cmd]
def _build_ssh_command(hostname, username, idfile, ssh_command, tunnel):
"""Uses hostname and other info to construct an SSH command."""
command = [_get_path('ssh'),
'-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout=5']
if idfile is not None:
command.extend(['-i', idfile])
if tunnel is not None:
# If there's a tunnel, run the ssh command on the tunneled host.
command.extend(['-A', '-t', tunnel, 'ssh', '-A', '-t'])
if username is not None:
command.append('{}@{}'.format(username, hostname))
else:
command.append(hostname)
if ssh_command is not None:
command.append(repr(ssh_command))
return(' '.join(command))
def _build_scp_command(hostname, username, idfile, is_get,
local_path, remote_path):
"""
Uses hostname and other info to construct an SCP command.
:param hostname: The hostname of the remote machine.
:type hostname: ``str``
:param username: The username to use on the remote machine.
:type username: ``str``
:param idfile: A path to the identity file to use.
:type idfile: ``str``
:param is_get: If true, we are getting a file rather than putting a file.
:type is_get: ``bool``
:param local_path: The path on the local file system.
:type local_path: ``str``
:param remote_path: The path on the remote file system.
:type remote_path: ``str``
"""
if hostname.strip() == '' or hostname is None:
raise ValueError('Empty hostname')
command = [_get_path('scp'),
'-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout=5',
'-o', 'UserKnownHostsFile={}'.format(_KNOWN_HOSTS_FILE)]
if idfile is not None:
command.extend(['-i', idfile])
if username is not None:
hostname = '%s@%s' % (username, hostname)
remote_path = '{}:{}'.format(hostname, remote_path)
if is_get:
command.extend([remote_path, local_path])
else:
command.extend([local_path, remote_path])
return ' '.join(command)
def _copy_to(entries, remote_path, local_path, profile):
"""
Performs an SCP command where the remote_path is the target and the
local_path is the source.
:param entries: A list of entries.
:type entries: ``list`` of :py:class:`HostEntry`
:param remote_path: The target path on the remote machine(s).
:type remote_path: ``str``
:param local_path: The source path on the local machine.
:type local_path: ``str``
:param profile: The profile, holding username/idfile info, etc.
:type profile: :py:class:`Profile`
"""
commands = []
for entry in entries:
hname = entry.hostname or entry.public_ip
cmd = _build_scp_command(hname, profile.username,
profile.identity_file,
is_get=False,
local_path=local_path,
remote_path=remote_path)
print('Command:', cmd)
commands.append({
'command': cmd,
'description': entry.display()
})
stream_commands(commands)
print(green('Finished copying'))
def _copy_from(entries, remote_path, local_path, profile):
"""
Performs an SCP command where the remote_path is the source and the
local_path is a format string, formatted individually for each host
being copied from so as to create one or more distinct paths on the
local system.
:param entries: A list of entries.
:type entries: ``list`` of :py:class:`HostEntry`
:param remote_path: The source path on the remote machine(s).
:type remote_path: ``str``
:param local_path: A format string for the path on the local machine.
:type local_path: ``str``
:param profile: The profile, holding username/idfile info, etc.
:type profile: :py:class:`Profile`
"""
commands = []
paths = set()
for entry in entries:
hname = entry.hostname or entry.public_ip
_local_path = entry.format_string(local_path)
if _local_path in paths:
raise ValueError('Duplicate local paths: one or more paths '
'had value {} after formatting.'
.format(local_path))
paths.add(_local_path)
# If the path references a folder, create the folder if it doesn't
# exist.
_folder = os.path.split(_local_path)[0]
if len(_folder) > 0:
if not os.path.exists(_folder):
print('Creating directory ' + _folder)
os.makedirs(_folder)
cmd = _build_scp_command(hname, profile.username,
profile.identity_file,
is_get=True,
local_path=_local_path,
remote_path=remote_path)
print('Command:', cmd)
commands.append({
'command': cmd,
'description': entry.display()
})
stream_commands(commands)
print(green('Finished copying'))
def _run_ssh_command(entries, username, idfile, command, tunnel,
parallel=False):
"""
Runs the given command over SSH in parallel on all hosts in `entries`.
:param entries: The host entries the hostnames from.
:type entries: ``list`` of :py:class:`HostEntry`
:param username: To use a specific username.
:type username: ``str`` or ``NoneType``
:param idfile: The SSH identity file to use, or none.
:type idfile: ``str`` or ``NoneType``
:param command: The command to run.
:type command: ``str``
:param parallel: If true, commands will be run in parallel.
:type parallel: ``bool``
"""
if len(entries) == 0:
print('(No hosts to run command on)')
return 1
if command.strip() == '' or command is None:
raise ValueError('No command given')
print('Running command {0} on {1} matching hosts'
.format(green(repr(command)), len(entries)))
shell_cmds = []
for entry in entries:
hname = entry.hostname or entry.public_ip
cmd = _build_ssh_command(hname, username, idfile, command, tunnel)
shell_cmds.append({
'command': cmd,
'description': entry.display()
})
stream_commands(shell_cmds, parallel=parallel)
print(green('All commands finished'))
def _connect_ssh(entry, username, idfile, tunnel=None):
"""
SSH into to a host.
:param entry: The host entry to pull the hostname from.
:type entry: :py:class:`HostEntry`
:param username: To use a specific username.
:type username: ``str`` or ``NoneType``
:param idfile: The SSH identity file to use, if supplying a username.
:type idfile: ``str`` or ``NoneType``
:param tunnel: Host to tunnel SSH command through.
:type tunnel: ``str`` or ``NoneType``
:return: An exit status code.
:rtype: ``int``
"""
if entry.hostname != "" and entry.hostname is not None:
_host = entry.hostname
elif entry.public_ip != "" and entry.public_ip is not None:
_host = entry.public_ip
elif entry.private_ip != "" and entry.private_ip is not None:
if tunnel is None:
raise ValueError("Entry does not have a hostname or public IP. "
"You can connect via private IP if you use a "
"tunnel.")
_host = entry.private_ip
else:
raise ValueError("No hostname, public IP or private IP information "
"found on host entry. I don't know how to connect.")
command = _build_ssh_command(_host, username, idfile, None, tunnel)
print('Connecting to %s...' % cyan(entry.display()))
print('SSH command: %s' % green(command))
proc = subprocess.Popen(command, shell=True)
return proc.wait()
def _print_version():
"""Print the version and exit."""
from __init__ import __version__
print(__version__)
sys.exit(0)
def _get_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description='List EC2 instances')
parser.add_argument('-l', '--latest', action='store_true', default=False,
help='Query AWS for latest instances')
parser.add_argument('--version', action='store_true', default=False,
help='Print version and exit')
parser.add_argument('--refresh-only', action='store_true', default=False,
help='Refresh cache and exit')
parser.add_argument('--host', help='Specific host to list',
default=None)
parser.add_argument('-s', '--ssh', action='store_true',
help='SSH to instance', default=False)
parser.add_argument('-i', '--identity-file', help='SSH identify file',
default=None)
parser.add_argument('-u', '--username', default=None,
help='Log in as this user')
parser.add_argument('filters', nargs='*',
help='Text filters for output lines')
parser.add_argument('-v', '--exclude', nargs='+',
help='Exclude results that match these')
parser.add_argument('-c', '--command', type=str,
help='Command to run on matching instance(s)')
parser.add_argument('-y', '--no-prompt', action='store_true', default=False,
help="Don't ask for confirmation before running a "
"command")
parser.add_argument('-p', '--profile', type=str,
help='Profile to use (defined | |
# encoding: utf-8
"""
@author: liaoxingyu
@contact: <EMAIL>
"""
import copy
import torch
from torch import nn
from .backbones.resnet import ResNet, BasicBlock, Bottleneck
from .backbones.senet import SENet, SEResNetBottleneck, SEBottleneck, SEResNeXtBottleneck
from .backbones.resnet_ibn_a import resnet50_ibn_a
from .backbones.twins import twins_svt_small,SELayer
from timm.models.resnet import Bottleneck,tv_resnet50
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class Baseline(nn.Module):
in_planes = 2048
def __init__(self, num_classes, last_stride, model_path, neck, neck_feat, model_name, pretrain_choice):
super(Baseline, self).__init__()
if model_name == 'resnet18':
self.in_planes = 512
self.base = ResNet(last_stride=last_stride,
block=BasicBlock,
layers=[2, 2, 2, 2])
elif model_name == 'resnet34':
self.in_planes = 512
self.base = ResNet(last_stride=last_stride,
block=BasicBlock,
layers=[3, 4, 6, 3])
elif model_name == 'resnet50':
self.base = ResNet(last_stride=last_stride,
block=Bottleneck,
layers=[3, 4, 6, 3])
elif model_name == 'resnet101':
self.base = ResNet(last_stride=last_stride,
block=Bottleneck,
layers=[3, 4, 23, 3])
elif model_name == 'resnet152':
self.base = ResNet(last_stride=last_stride,
block=Bottleneck,
layers=[3, 8, 36, 3])
elif model_name == 'se_resnet50':
self.base = SENet(block=SEResNetBottleneck,
layers=[3, 4, 6, 3],
groups=1,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=last_stride)
elif model_name == 'se_resnet101':
self.base = SENet(block=SEResNetBottleneck,
layers=[3, 4, 23, 3],
groups=1,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=last_stride)
elif model_name == 'se_resnet152':
self.base = SENet(block=SEResNetBottleneck,
layers=[3, 8, 36, 3],
groups=1,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=last_stride)
elif model_name == 'se_resnext50':
self.base = SENet(block=SEResNeXtBottleneck,
layers=[3, 4, 6, 3],
groups=32,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=last_stride)
elif model_name == 'se_resnext101':
self.base = SENet(block=SEResNeXtBottleneck,
layers=[3, 4, 23, 3],
groups=32,
reduction=16,
dropout_p=None,
inplanes=64,
input_3x3=False,
downsample_kernel_size=1,
downsample_padding=0,
last_stride=last_stride)
elif model_name == 'senet154':
self.base = SENet(block=SEBottleneck,
layers=[3, 8, 36, 3],
groups=64,
reduction=16,
dropout_p=0.2,
last_stride=last_stride)
elif model_name == 'resnet50_ibn_a':
self.base = resnet50_ibn_a(last_stride)
elif model_name == 'twins_svt_small':
self.base = twins_svt_small(pretrained=True)
self.in_planes = 512
# resnet = tv_resnet50(pretrained=True)
# resnetPost1 = resnet.layer3
# resnetPost2 = resnet.layer4
# resnetPost1.load_state_dict(resnet.layer3.state_dict())
# resnetPost2.load_state_dict(resnet.layer4.state_dict())
# self.globalBranch = copy.deepcopy(resnetPost1)
# self.localBranch = nn.Sequential(copy.deepcopy(resnetPost1),
# copy.deepcopy(resnetPost2))
if pretrain_choice == 'imagenet' and model_name != 'twins_svt_small':
self.base.load_param(model_path)
print('Loading pretrained ImageNet model......')
self.avggap = nn.AdaptiveAvgPool2d(1)
self.maxgap = nn.AdaptiveMaxPool2d(1)
# self.gap = nn.AdaptiveMaxPool2d(1)
# self.gap = nn.AdaptiveAvgPool2d(1)
# self.f_1_avggap = nn.AdaptiveAvgPool2d(1)
# self.f_1_maxgap = nn.AdaptiveMaxPool2d(1)
# self.f_2_avggap = nn.AdaptiveAvgPool2d(1)
# self.f_2_maxgap = nn.AdaptiveMaxPool2d(1)
# self.f_3_avggap = nn.AdaptiveAvgPool2d(1)
# self.f_3_maxgap = nn.AdaptiveMaxPool2d(1)
# self.f_4_avggap = nn.AdaptiveAvgPool2d(1)
# self.f_4_maxgap = nn.AdaptiveMaxPool2d(1)
# reductiong = nn.Sequential(nn.Conv2d(2048, 256, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU())
# reductionl = nn.Sequential(nn.Conv2d(2048, 1024, 1, bias=False), nn.BatchNorm2d(1024), nn.ReLU())
#
# # self._init_reduction(reductiong)
# self._init_reduction(reductionl)
# # reductiong.apply(_init_reduction)
# # reductionl.apply(_init_reduction)
# # self._init_reduction(reduction)
# # self.reduction_0 = copy.deepcopy(reductiong)
# self.reduction_1 = copy.deepcopy(reductionl)
# self.reduction_2 = copy.deepcopy(reductionl)
# self.reduction_3 = copy.deepcopy(reductionl)
# self.reduction_4 = copy.deepcopy(reductionl)
# se = SELayer(channel=512)
self.b1 = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=3, stride=1),
nn.BatchNorm2d(512),
# se,
nn.ReLU()
)
self._init_reduction(self.b1)
# self.b2 = nn.Sequential(
# nn.Conv2d(256, 512, kernel_size=3, stride=2),
# nn.BatchNorm2d(512),
# # se,
# nn.ReLU()
# )
# self._init_reduction(self.b2)
# self.b3 = nn.Sequential(
# nn.Conv2d(512, 256, kernel_size=1, stride=1),
# nn.BatchNorm2d(256),
# # se,
# nn.ReLU()
# )
# self._init_reduction(self.b3)
# self.b4 = nn.Sequential(
# nn.Conv2d(64, 128, kernel_size=3, stride=2),
# nn.BatchNorm2d(128),
# # se,
# nn.ReLU()
# )
self.num_classes = num_classes
self.neck = neck
self.neck_feat = neck_feat
if self.neck == 'no':
self.in_planes = self.in_planes
self.classifier = nn.Linear(self.in_planes, self.num_classes)
# self.classifier_f1 = nn.Linear(self.in_planes, self.num_classes)
# self.classifier_f2 = nn.Linear(self.in_planes, self.num_classes)
# self.classifier_f3 = nn.Linear(self.in_planes, self.num_classes)
# self.classifier_f4 = nn.Linear(self.in_planes, self.num_classes)
# self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False) # new add by luo
# self.classifier.apply(weights_init_classifier) # new add by luo
elif self.neck == 'bnneck':
# self.in_planes = self.in_planes//8
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False) # no shift
self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)
self.bottleneck.apply(weights_init_kaiming)
self.classifier.apply(weights_init_classifier)
self.bottleneck_f1 = nn.BatchNorm1d(self.in_planes)
self.bottleneck_f1.bias.requires_grad_(False) # no shift
self.classifier_f1 = nn.Linear(self.in_planes, self.num_classes, bias=False)
self.bottleneck_f1.apply(weights_init_kaiming)
self.classifier_f1.apply(weights_init_classifier)
self.bottleneck_f2 = nn.BatchNorm1d(self.in_planes)
self.bottleneck_f2.bias.requires_grad_(False) # no shift
self.classifier_f2 = nn.Linear(self.in_planes, self.num_classes, bias=False)
self.bottleneck_f2.apply(weights_init_kaiming)
self.classifier_f2.apply(weights_init_classifier)
# #
# self.bottleneck_f3 = nn.BatchNorm1d(self.in_planes)
# self.bottleneck_f3.bias.requires_grad_(False) # no shift
# self.classifier_f3 = nn.Linear(self.in_planes, self.num_classes, bias=False)
#
# self.bottleneck_f3.apply(weights_init_kaiming)
# self.classifier_f3.apply(weights_init_classifier)
#
# self.bottleneck_f4 = nn.BatchNorm1d(self.in_planes)
# self.bottleneck_f4.bias.requires_grad_(False) # no shift
# self.classifier_f4 = nn.Linear(self.in_planes, self.num_classes, bias=False)
#
# self.bottleneck_f4.apply(weights_init_kaiming)
# self.classifier_f4.apply(weights_init_classifier)
##single
# def forward(self, x):
# feat_map = self.base(x)
# # print(feat_map.shape)
# # global_feat = self.gap(feat_map) # (b, 2048, 1, 1)
# # global_feat = global_feat.view(global_feat.shape[0], -1) # flatten to (bs, 2048)
# global_feat = feat_map.view(feat_map.shape[0], -1) # flatten to (bs, 2048)
#
# if self.neck == 'no':
# feat = global_feat
# elif self.neck == 'bnneck':
# feat = self.bottleneck(global_feat) # normalize for angular softmax
#
# if self.training:
# cls_score = self.classifier(feat)
#
# return cls_score, global_feat # global feature for triplet loss
# else:
# if self.neck_feat == 'after':
# # print("Test with feature after BN")
# return feat
# else:
# # print("Test with feature before BN")
# return global_feat
@staticmethod
def _init_reduction(reduction):
# conv
nn.init.kaiming_normal_(reduction[0].weight, mode='fan_in')
# nn.init.constant_(reduction[0].bias, 0.)
# bn
nn.init.normal_(reduction[1].weight, mean=1., std=0.02)
nn.init.constant_(reduction[1].bias, 0.)
# ##multi
# def forward(self, x):
# feat_map = self.base(x) ###b,512,7,7
# global_feat = self.maxgap(feat_map).squeeze(dim=3).squeeze(dim=2) ##b,1024
# # global_feat = self.reduction_0(global_feat)
#
#
# ###local branch
# local_f1_map = feat_map[:,:,0:3,:] ##b,512,1,2
# local_f2_map = feat_map[:,:,2:5,:] ##b,512,1,2
# local_f3_map = feat_map[:,:,4:7,:] ##b,512,1,2
#
#
# local_f1_final = self.avggap(local_f1_map).squeeze(dim=3).squeeze(dim=2) ## b,512
# local_f2_final = self.avggap(local_f2_map).squeeze(dim=3).squeeze(dim=2)
# local_f3_final = self.avggap(local_f3_map).squeeze(dim=3).squeeze(dim=2)
#
#
# if self.neck == 'no':
# feat = global_feat
# feat_f1 = local_f1_final
# feat_f2 = local_f2_final
# feat_f3 = local_f3_final
#
#
# elif self.neck == 'bnneck':
# feat = self.bottleneck(global_feat) # normalize for angular softmax
#
# feat_f1 = self.bottleneck_f1(local_f1_final)
# feat_f2 = self.bottleneck_f2(local_f2_final)
# feat_f3 = self.bottleneck_f3(local_f3_final)
#
#
# if self.training:
# cls_score = self.classifier(feat)
# cls_f1 = self.classifier_f1(feat_f1)
# cls_f2 = self.classifier_f2(feat_f2)
# cls_f3 = self.classifier_f3(feat_f3)
#
#
# # return cls_score, global_feat # global feature for triplet loss
# return [cls_score,cls_f1,cls_f2,cls_f3], [global_feat,local_f1_final,local_f2_final,local_f3_final] # global feature for triplet loss
# else:
# if self.neck_feat == 'after':
# # print("Test with feature after BN")
# final_feat = torch.cat([feat,feat_f1,feat_f2,feat_f3],dim=1) #b,3072
# return final_feat
# else:
# # print("Test with feature before BN")
# final_feat = torch.cat([global_feat,local_f1_final,local_f2_final,local_f3_final],dim=1)
# return final_feat
# #multi block3
# def forward(self, x):
# feat_mapBlock3 , feat_map = self.base(x) ###b,512,7,7
# global_feat = self.avggap(feat_map).squeeze(dim=3).squeeze(dim=2) ##b,1024
#
#
# ###local branch
# local_f1_map = feat_mapBlock3[:, 0:64, :, :] ##b,64,14,14
# local_f2_map = feat_mapBlock3[:, 64:128, :, :] ##b,64,14,14
# local_f3_map = feat_mapBlock3[:, 128:192, :, :] ##b,64,14,14
# local_f4_map = feat_mapBlock3[:, 192:256, :, :] ##b,64,14,14
#
# local_f1_final = self.avggap(self.b1(local_f1_map)).squeeze(dim=3).squeeze(dim=2) ## b,512
# local_f2_final = self.avggap(self.b2(local_f2_map)).squeeze(dim=3).squeeze(dim=2)
# local_f3_final = self.avggap(self.b3(local_f3_map)).squeeze(dim=3).squeeze(dim=2)
# local_f4_final = self.avggap(self.b4(local_f4_map)).squeeze(dim=3).squeeze(dim=2)
#
# feat_local_final = torch.cat([local_f1_final, local_f2_final, local_f3_final, local_f4_final], dim=1)
#
# if self.neck == 'no':
# feat = global_feat
# feat_local = feat_local_final
#
# elif self.neck == 'bnneck':
# feat = self.bottleneck(global_feat) # normalize for angular softmax
# feat_local = self.bottleneck_f1(feat_local_final)
#
# if self.training:
# cls_score = self.classifier(feat)
# cls_f1 = self.classifier_f1(feat_local)
#
# # return cls_score, global_feat # global feature for triplet loss
# return [cls_score, cls_f1], [global_feat, feat_local_final] # global feature for triplet loss
# else:
# if self.neck_feat == 'after':
# # print("Test with feature after BN")
# final_feat = torch.cat([feat, feat_local], dim=1) # b,3072
# return final_feat
# else:
# # print("Test with feature before BN")
# final_feat = torch.cat([global_feat, feat_local], dim=1)
# return final_feat
# multi block3 block2
def forward(self, x):
feat_mapBlock3, feat_map = self.base(x) ###b,512,7,7
global_feat = self.avggap(feat_map).squeeze(dim=3).squeeze(dim=2) ##b,512
global_feat_blk3 = self.b1(feat_mapBlock3) ## b,512,12,12
###local branch
local_f1_map = global_feat_blk3[:, :, 0:6, :] ##b,512,6,12
local_f2_map = global_feat_blk3[:, :, 6:12, :] ##b,512,6,12
# local_f1_final = self.avggap(self.b1(local_f1_map)).squeeze(dim=3).squeeze(dim=2) ## b,512
# local_f2_final = self.avggap(self.b2(local_f2_map)).squeeze(dim=3).squeeze(dim=2)
local_f1_final = self.avggap(local_f1_map).squeeze(dim=3).squeeze(dim=2) ## b,512
local_f2_final = self.avggap(local_f2_map).squeeze(dim=3).squeeze(dim=2)
# feat_local_final = torch.cat([local_f1_final,local_f2_final],dim=1)
if self.neck == 'no':
feat = global_feat
# feat_local = | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by <NAME> at 2019-08-20
"""Step_refine_pipline_final.py
:description : basic check .mass balance,annotation,notes
:param :
:returns:
:rtype:
"""
import os
import pickle
import re
import cobra
import pandas as pd
from Bio import SeqIO
import My_def
os.chdir('../../ComplementaryData/Step_03_Compare_Refine/')
# %% os.chdir('ComplementaryData/Step_03_Compare_Refine/')
print('----- loading data -----')
Lreu_draft_3_refined = cobra.io.load_json_model('Lreu_draft_3_refined_part03.json')
Lreuteri_530 = cobra.io.load_json_model('../Step_02_DraftModels/Template/template_models/Lreuteri_530_standlized.json')
iNF517 = cobra.io.load_json_model('../Step_02_DraftModels/Template/template_models/iNF517_standlized.json')
iML1515 = cobra.io.load_json_model('../Step_02_DraftModels/Template/template_models/iML1515_standlized.json')
# %% <general step: mass balance>
iML1515_mets = set([i.id for i in iML1515.metabolites])
iNF517_mets = set([i.id for i in iNF517.metabolites])
Lreuteri_530_mets = set([i.id for i in Lreuteri_530.metabolites])
iML1515_reas = set([i.id for i in iML1515.reactions])
iNF517_reas = set([i.id for i in iNF517.reactions])
Lreuteri_530_reas = set([i.id for i in Lreuteri_530.reactions])
iML1515_reas_balanced = set([i.id for i in iML1515.reactions if i.check_mass_balance() == {}])
iNF517_reas_balanced = set([i.id for i in iNF517.reactions if i.check_mass_balance() == {}])
Lreuteri_530_reas_balanced = set([i.id for i in Lreuteri_530.reactions if i.check_mass_balance() == {}])
Lreu_draft_3_refined_reas = set([i.id for i in Lreu_draft_3_refined.reactions])
Lreu_draft_3_refined_reas_balanced = set([i.id for i in Lreu_draft_3_refined.reactions if i.check_mass_balance() == {}])
print('unbalanced reactions number:\t', len(Lreu_draft_3_refined_reas - Lreu_draft_3_refined_reas_balanced))
# <metabolites>
for met_i in Lreu_draft_3_refined.metabolites:
if met_i.id in iML1515_mets:
met_temp = iML1515.metabolites.get_by_id(met_i.id)
elif met_i.id in Lreuteri_530_mets:
met_temp = Lreuteri_530.metabolites.get_by_id(met_i.id)
elif met_i.id in iNF517_mets:
met_temp = iNF517.metabolites.get_by_id(met_i.id)
else:
# print(met_i)
continue
if met_i.formula != met_temp.formula:
# print(met_i.id, met_i.formula, met_temp.formula)
if met_temp.formula not in ['X', 'R', '']:
Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).formula = met_temp.formula
Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).charge = met_temp.charge
elif met_i.charge != met_temp.charge:
# print(met_i.id, met_i.charge, met_temp.charge)
if met_temp.formula not in ['X', 'R', '']:
Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).charge = met_temp.charge
Lreu_draft_3_refined_reas_balanced = set([i.id for i in Lreu_draft_3_refined.reactions if i.check_mass_balance() == {}])
print('unbalanced reactions number:\t', len(Lreu_draft_3_refined_reas - Lreu_draft_3_refined_reas_balanced))
# <reactions>
for rea_i in (Lreu_draft_3_refined_reas - Lreu_draft_3_refined_reas_balanced) & Lreuteri_530_reas_balanced:
if Lreu_draft_3_refined.reactions.get_by_id(rea_i).reaction != Lreuteri_530.reactions.get_by_id(rea_i).reaction:
# print(Lreu_draft_3_refined.reactions.get_by_id(rea_i))
# print(Lreuteri_530.reactions.get_by_id(rea_i))
if '_LRE' not in Lreu_draft_3_refined.reactions.get_by_id(rea_i).reaction:
pass
# Lreu_draft_3_refined.reactions.get_by_id(rea_i).reaction = Lreuteri_530.reactions.get_by_id(rea_i).reaction
for met_i in Lreu_draft_3_refined.reactions.get_by_id(rea_i).metabolites.keys():
if met_temp.formula not in ['X', 'R', '']:
Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).formula = Lreuteri_530.metabolites.get_by_id(
met_i.id).formula
Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).charge = Lreuteri_530.metabolites.get_by_id(
met_i.id).charge
Lreu_draft_3_refined_reas_balanced = set([i.id for i in Lreu_draft_3_refined.reactions if i.check_mass_balance() == {}])
print('unbalanced reactions number:\t', len(Lreu_draft_3_refined_reas - Lreu_draft_3_refined_reas_balanced))
for rea_i in (Lreu_draft_3_refined_reas - Lreu_draft_3_refined_reas_balanced) & iML1515_reas_balanced:
if Lreu_draft_3_refined.reactions.get_by_id(rea_i).reaction != iML1515.reactions.get_by_id(rea_i).reaction:
# print(Lreu_draft_3_refined.reactions.get_by_id(rea_i))
# print(iML1515.reactions.get_by_id(rea_i))
# if rea_i not in ['MPTS']:
Lreu_draft_3_refined.reactions.get_by_id(rea_i).reaction = iML1515.reactions.get_by_id(rea_i).reaction
for met_i in Lreu_draft_3_refined.reactions.get_by_id(rea_i).metabolites.keys():
try:
Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).formula = iML1515.metabolites.get_by_id(
met_i.id).formula
Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).charge = iML1515.metabolites.get_by_id(met_i.id).charge
except:
pass
Lreu_draft_3_refined_reas_balanced = set([i.id for i in Lreu_draft_3_refined.reactions if i.check_mass_balance() == {}])
print('unbalanced reactions number:\t', len(Lreu_draft_3_refined_reas - Lreu_draft_3_refined_reas_balanced))
# <hand check>manual check
for met_i in Lreu_draft_3_refined.metabolites: # replace ACP:'X' to 'C11H21N2O7PRS'
if 'acp_c' in met_i.id:
# try:
# met_i.id = met_i.id.replace('acp_c','ACP_c')
# except:
# new_id = met_i.id.replace('acp_c','ACP_c')
# Lreu_draft_3_refined = My_def.merge_model.merge_reactionsid(Lreu_draft_3_refined, new_id, met_i.id)
formula = Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).formula
Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).formula = formula.replace('X', 'C11H21N2O7PRS')
elif 'ACP_c' in met_i.id:
formula = Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).formula
Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).formula = formula.replace('X', 'C11H21N2O7PRS')
Lreu_draft_3_refined.reactions.get_by_id('FMETTRS').reaction = Lreuteri_530.reactions.get_by_id('FMETTRS').reaction
Lreu_draft_3_refined.reactions.get_by_id(
'PROTRS').reaction = 'atp_c + pro__L_c + trnapro_c --> amp_c + ppi_c + protrna_c'
refine_dic = {
"2h3mp_c": ['C6H11O3', -1],
"2h3mp_e": ['C6H11O3', -1],
"2h3mb_c": ['C5H9O3', -1],
"dimp_c": ['C10H12N4O7P', -1],
"fol_e": ['C19H17N7O6', -2],
"MGD_c": ['C20H20N10O14P2S2Mo', -2],
"ppoh_e": ['C3H8O', 0],
"PreZ_c": ['C10H11N5O7P', -1],
"RTAala_c": ['C219H433N27O218P27', 25],
"PGlac2_c": ['C40H62N7O22', -1],
"glutrnagln_c": ['C5H7NO3R', 0],
"pa_LRE_c": ['C37.36H67.59O8P', -2],
"RNA_LRE_c": ['C9.52H9.91N3.75O6.58P1', -1],
"PROT_LRE_c": ['C4.91H8.91N1.42O1.5S0.37', 1],
"CPS_LRE_c": ['C24H47O27P', -2],
"DNA_LRE_c": ['C9.81H11.31N3.69O6P', -1],
"LIP_LRE_c": ['C41.68H77.65N0.46O10.17P', -1],
"LTAtotal_LRE_c": ['C162.46H312.39N15.3O136.3P20', -5],
}
for met_i, formula in refine_dic.items():
Lreu_draft_3_refined.metabolites.get_by_id(met_i).formula = formula[0]
Lreu_draft_3_refined.metabolites.get_by_id(met_i).charge = formula[1]
Lreu_draft_3_refined_reas_unbalanced = set(
[i.id for i in Lreu_draft_3_refined.reactions if (i.check_mass_balance() != {}) and ("EX" not in i.id)])
print('unbalanced reactions number:\t', len(Lreu_draft_3_refined_reas_unbalanced))
# removed_set = set(['PUACGAMS','HDMAT7','MCMAT8','GLUTRS2','ACKVAL','PTAVAL','ACKILE','PTAILE','SBTD','SBTD_L_1','NTPP10','ACKLEU','PTALEU','PYNP4'])
#
# for rea_i in list(Lreu_draft_3_refined_reas_unbalanced - removed_set):
#
# rea = Lreu_draft_3_refined.reactions.get_by_id(rea_i)
# # if '_LRE' in rea.reaction:
# # continue
# if rea.check_mass_balance() !={}:
# print('\n', rea)
# print(rea.check_mass_balance())
Lreu_draft_3_refined.objective = "BIOMASS"
Lreu_draft_3_refined.optimize()
print('Lreu_draft_3_refined Biomass_LRE:', Lreu_draft_3_refined.optimize())
# %% <general step: metabolites and reactions annotation>
bigg = pickle.load(open('../bigg_database/universal_model.pickle', 'rb'))
bigg_mets = set([i.id for i in bigg.metabolites])
bigg_reas = set([i.id for i in bigg.reactions])
# Lreu_draft_3_refined = cobra.io.load_json_model('Lreu_draft_3_refined_part03.json')
for met_i in Lreu_draft_3_refined.metabolites:
annotation_i = met_i.annotation
if met_i.id in iML1515_mets:
annotation_i = My_def.model_refine.merge_annotation(annotation_i,
iML1515.metabolites.get_by_id(met_i.id).annotation)
if met_i.id in Lreuteri_530_mets:
annotation_i = My_def.model_refine.merge_annotation(annotation_i,
Lreuteri_530.metabolites.get_by_id(met_i.id).annotation)
if met_i.id in iNF517_mets:
annotation_i = My_def.model_refine.merge_annotation(annotation_i,
iNF517.metabolites.get_by_id(met_i.id).annotation)
if met_i.id in bigg_mets:
annotation_temp = bigg.metabolites.get_by_id(met_i.id).annotation
annotation_temp = My_def.model_refine.convert_annotation(annotation_temp)
annotation_i = My_def.model_refine.merge_annotation(annotation_i, annotation_temp)
if 'bigg.metabolite' not in annotation_i.keys():
annotation_temp = {'bigg.metabolite': met_i.id.split('_')[0]}
annotation_i = My_def.model_refine.merge_annotation(annotation_i, annotation_temp)
if 'kegg.compound' in annotation_i.keys():
annotation_temp = annotation_i['kegg.compound']
if type(annotation_temp) == list:
annotation_i['kegg.compound'] = [i for i in annotation_temp if i.startswith('C')]
if 'sbo' not in annotation_i:
if '_LRE' in met_i.id:
# pass
annotation_temp = {'sbo': ['SBO:0000649']} # Biomass
else:
annotation_temp = {'sbo': ['SBO:0000247']} # Simple chemical
annotation_i = My_def.model_refine.merge_annotation(annotation_i, annotation_temp)
Lreu_draft_3_refined.metabolites.get_by_id(met_i.id).annotation = annotation_i
for rea_i in Lreu_draft_3_refined.reactions:
annotation_i = rea_i.annotation
if rea_i.id in iML1515_reas:
annotation_temp = iML1515.reactions.get_by_id(rea_i.id).annotation
annotation_i = My_def.model_refine.merge_annotation(annotation_i, annotation_temp)
if rea_i.id in Lreuteri_530_reas:
annotation_temp = Lreuteri_530.reactions.get_by_id(rea_i.id).annotation
annotation_i = My_def.model_refine.merge_annotation(annotation_i, annotation_temp)
if rea_i.id in iNF517_reas:
annotation_temp = iNF517.reactions.get_by_id(rea_i.id).annotation
annotation_i = My_def.model_refine.merge_annotation(annotation_i, annotation_temp)
if rea_i.id in bigg_reas:
annotation_temp = bigg.reactions.get_by_id(rea_i.id).annotation
annotation_temp = My_def.model_refine.convert_annotation(annotation_temp)
annotation_i = My_def.model_refine.merge_annotation(annotation_i, annotation_temp)
if 'bigg.reaction' not in annotation_i.keys():
annotation_i = My_def.model_refine.merge_annotation(annotation_i, {'bigg.reaction': rea_i.id})
if 'rhea' in annotation_i.keys():
annotation_temp = annotation_i['rhea']
if type(annotation_temp) == list:
annotation_i['rhea'] = [i.split('#')[0] for i in annotation_temp]
# if 'sbo' not in annotation_i:
if rea_i.id.startswith('EX'):
annotation_temp = {'sbo': ['SBO:0000627']} # Exchange reactions
elif 'BIOMASS' in rea_i.id or '_LRE' in rea_i.reaction: # :
annotation_temp = {'sbo': ['SBO:0000629']} # Biomass reaction
elif '_c' in rea_i.reaction and '_e' in rea_i.reaction:
annotation_temp = {'sbo': ['SBO:0000655']} # Transport reaction
else:
annotation_temp = {'sbo': ['SBO:0000176']}
annotation_i = My_def.model_refine.merge_annotation(annotation_i, annotation_temp)
Lreu_draft_3_refined.reactions.get_by_id(rea_i.id).annotation = annotation_i
for rea_i in Lreu_draft_3_refined.reactions:
for i, j in rea_i.annotation.items():
if type(j) == str:
rea_i.annotation[i] = [j]
# get metacyc annotation important for subsystems.
def fill_specific_database(model_1, to_database):
model = model_1.copy()
id_rea_list = [rea_i.id for rea_i in model.reactions]
metanetx_rea_list = []
bigg_rea_list = []
kegg_rea_list = []
biocyc_rea_list = []
for rea_i in model.reactions:
if type(rea_i.annotation['bigg.reaction']) == str:
rea_i.annotation['bigg.reaction'] = [rea_i.annotation['bigg.reaction']]
else:
bigg_rea_list = bigg_rea_list + rea_i.annotation['bigg.reaction']
if 'metanetx.reaction' in rea_i.annotation.keys():
metanetx_rea_list = metanetx_rea_list + rea_i.annotation['metanetx.reaction']
if 'kegg.reaction' in rea_i.annotation.keys():
kegg_rea_list = kegg_rea_list + rea_i.annotation['kegg.reaction']
if 'biocyc' in rea_i.annotation.keys():
biocyc_rea_list = biocyc_rea_list + rea_i.annotation['biocyc']
biocyc_rea_list = [i.replace('META:', '') for i in biocyc_rea_list]
targetlist_from_id, MNX_IDlist = My_def.mapIDsViaMNXref('rxns', id_rea_list, 'bigg', to_database)
targetlist_from_meatnetx, MNX_IDlist = My_def.mapIDsViaMNXref('rxns', metanetx_rea_list, 'metanetx', to_database)
targetlist_from_kegg, MNX_IDlist = My_def.mapIDsViaMNXref('rxns', kegg_rea_list, 'kegg', to_database)
targetlist_from_biocyc, MNX_IDlist = My_def.mapIDsViaMNXref('rxns', biocyc_rea_list, 'metacyc', to_database)
targetlist_from_bigg, MNX_IDlist = My_def.mapIDsViaMNXref('rxns', bigg_rea_list, 'bigg', to_database)
if to_database == 'metacyc':
annotation_key = 'biocyc'
if to_database == 'kegg':
annotation_key = 'kegg.reaction'
for rea_i in model.reactions:
if annotation_key in rea_i.annotation.keys():
to_temp = rea_i.annotation[annotation_key]
if rea_i.annotation[annotation_key] != ['']:
continue
to_temp = [targetlist_from_id[id_rea_list.index(rea_i.id)]]
list_ann = ['metanetx.reaction', 'kegg.reaction', 'biocyc', 'bigg.reaction']
list_ann.remove(annotation_key)
for ann_i in list_ann:
if ann_i in rea_i.annotation.keys():
for i in rea_i.annotation[ann_i]:
if ann_i == 'metanetx.reaction':
to_temp_i = targetlist_from_meatnetx[metanetx_rea_list.index(i)]
elif ann_i == 'kegg.reaction':
to_temp_i = targetlist_from_kegg[kegg_rea_list.index(i)]
elif ann_i == 'biocyc':
i = i.replace('META:', '')
to_temp_i = targetlist_from_biocyc[biocyc_rea_list.index(i)]
elif ann_i == 'bigg.reaction':
to_temp_i = targetlist_from_bigg[bigg_rea_list.index(i)]
if type(to_temp_i) == str:
to_temp_i = [to_temp_i]
to_temp.extend(to_temp_i)
to_temp = set(to_temp) - {''}
if len(to_temp) != 0:
# print(to_temp)
rea_i.annotation[annotation_key] = list(to_temp)
return model
to_database = 'metacyc'
Lreu_draft_3_refined = fill_specific_database(Lreu_draft_3_refined, to_database)
to_database = 'kegg'
Lreu_draft_3_refined = fill_specific_database(Lreu_draft_3_refined, to_database)
# %% <general step: subsystem annotation>
metacyc_subsystem = {}
for i in open('../Initial_data/MetaCyc_subSystems.csv', 'r'):
i = i.replace('\n', '')
temp = i.split('\t')
if temp[1] != '':
metacyc_subsystem[temp[0]] = temp[1:]
kegg_subsystem = {}
for i in open('../Initial_data/KEGG_subSystems.csv', 'r'):
i = i.replace('\n', '')
temp = i.split('\t')
if temp[1] != '':
kegg_subsystem[temp[0]] = temp[1:]
sub_count = 0
for rea_i in Lreu_draft_3_refined.reactions:
subsystem = []
if 'biocyc' in rea_i.annotation.keys():
for i in rea_i.annotation['biocyc']:
i = i.replace('META:', '')
if i in metacyc_subsystem.keys():
subsystem.extend(metacyc_subsystem[i])
elif 'kegg.reaction' in rea_i.annotation.keys():
for i in rea_i.annotation['kegg.reaction']:
if i in kegg_subsystem.keys():
subsystem.extend(kegg_subsystem[i])
subsystem = set(subsystem) - {''}
if len(subsystem) > 0:
rea_i.annotation['subsystem'] = list(set(subsystem))
sub_count += 1
# %% <general step: notes>
for rea_i in Lreu_draft_3_refined.reactions:
if rea_i.notes == []:
rea_i.notes = {}
if 'from' in rea_i.notes.keys():
if 'Lreu_from_Lreuteri_530' in rea_i.notes['from']:
Lreu_draft_3_refined.reactions.get_by_id(rea_i.id).notes['from'].remove('Lreu_from_Lreuteri_530')
if 'transport' in rea_i.notes['from']:
Lreu_draft_3_refined.reactions.get_by_id(rea_i.id).notes['from'].remove('transport')
Lreu_draft_3_refined.reactions.get_by_id(rea_i.id).notes['from'].append('transport_reaction')
if 'EX' in rea_i.notes['from']:
Lreu_draft_3_refined.reactions.get_by_id(rea_i.id).notes['from'].remove('EX')
Lreu_draft_3_refined.reactions.get_by_id(rea_i.id).notes['from'].append('exchange_reaction')
else:
if rea_i.id in iML1515_reas:
notes_temp = {'from': ['iML1515', 'gap']}
elif rea_i.id in Lreuteri_530_reas:
notes_temp = {'from': ['Lreuteri_530', 'gap']}
elif rea_i.id in iNF517_reas:
notes_temp = {'from': ['iNF517', 'gap']}
else:
notes_temp = {'from': ['gap']}
Lreu_draft_3_refined.reactions.get_by_id(rea_i.id).notes = notes_temp
# %% <general step: genes annotation and add _missing tag to missing genes >
print('blast ing') # find missing genes
'''
os.system('mkdir blast_tmps/')
Lreu_draft_3_refined_seq = '../Step_02_DraftModels/Lreuteri_biogaia_v03_2.faa'
iNF517_seq = '../Step_02_DraftModels/Template/template_seqs/iNF517.faa'
iML1515_seq = '../Step_02_DraftModels/Template/template_seqs/iML1515.faa'
Lreuteri_530_seq = '../Step_02_DraftModels/Template/template_seqs/Lreuteri_530.faa'
iBT721_seq = '../Step_02_DraftModels/Template/template_seqs/iBT721.faa'
qseq_file = Lreu_draft_3_refined_seq
mk_db = 'diamond makedb --in '+ qseq_file +' -d blast_tmps/qseq_db \n'
os.system(mk_db)
print('diamond blasting...')
options = ' --top 10 --more-sensitive '
df = pd.DataFrame()
for sseq_file in [iNF517_seq,iML1515_seq,Lreuteri_530_seq,iBT721_seq]:
diamond_blastp_cmd ='diamond blastp -d blast_tmps/qseq_db -q ' + sseq_file + options +' -o ' +\
'blast_tmps/blast_result_s_in_q.csv --evalue 1 --outfmt 6 qseqid sseqid | |
# Integer matrix multiply 8816 A/B
# load 1/2/4 submatrix
partition_contiguous_idx = lane_in_quad // factor_in_partition # 0011 0011 0011 0011
# access_strided_idx = lane_idx // warp_bfa_access_shape[1]
access_strided_idx = lane_idx // new_obj.layout.factor
access_contiguous_idx = (
(lane_in_pair * factor_in_partition)
^ # 0202 0202 0202 0202 0202 0202 0202 0202
(lane_in_quad_quad // new_obj.layout.factor)
) # 0000 1111 2222 3333 0000 1111 2222 3333
# stride: 0000 1111 2222 3333 4444 5555 6666 7777
# acc_idx_c: 0202 1313 2020 3131 0202 1313 2020 3131
# if 01 45, noop: ^ 0
# if 23 67, switch: ^ 1
# if 1 3 5 7, += 1
acc_idx_c = (lane_idx & 1) * 2 # 0202
if (access_strided_idx // 2) & 1 == 1:
acc_idx_c ^= 0b10 # 0202 0202 2020 2020 0202 0202 2020 2020
if access_strided_idx & 1 == 1:
acc_idx_c += 1
assert acc_idx_c == access_contiguous_idx, f"{lane_idx}, {acc_idx_c}, {access_contiguous_idx}"
elif (new_obj.lds_shape[0] == (new_obj.lds_shape.prod() // 2)
and new_obj.operand_a):
# Integer matrix multiply 16832 A
# Q0 Q2
# Q1 Q3
partition_contiguous_idx = lane_in_quad // factor_in_partition
access_strided_idx = lane_in_quad_quad // new_obj.layout.factor
# stride: 0000 1111 2222 3333 0000 1111 2222 3333
# 0202.... + 0[16] [16] ^
access_contiguous_idx = (
((lane_in_pair * factor_in_partition + quad_quad)
^ access_strided_idx))
elif (new_obj.lds_shape[0] == (new_obj.lds_shape.prod() // 2)
and not new_obj.operand_a):
# Integer matrix multiply 16832 B
# Q0 Q1
# Q2 Q3
partition_contiguous_idx = lane_in_quad // factor_in_partition
access_strided_idx = lane_in_quad_pair // new_obj.layout.factor + quad_quad * 2
access_contiguous_idx = (((lane_in_pair * factor_in_partition +
((lane_idx & 8) >> 3))
^ access_strided_idx))
elif new_obj.layout.factor == 2:
# Super Matrix multiply kBlock = 32
if (new_obj.lds_shape[0] == new_obj.lds_shape.prod()):
# Matrix multiply 1688 A/B
# (Q stands for 1 8x128bit block).
# Q0
# Q1
# Q2
# Q3
# Four blocks are next to each other in the strided dimension.
partition_contiguous_idx = (lane_idx % new_obj.layout.factor)
# these lines are matching swizzle behavior in tensorop layout.
# for k > 0, offset is handled in set_wmma_index
access_contiguous_idx = (lane_in_quad_pair //
new_obj.layout.factor
) # 00 11 22 33 00 11 22 33
access_strided_idx = lane_idx // new_obj.layout.factor # 00 11 22 33 ....
elif (new_obj.lds_shape[0] == (new_obj.lds_shape.prod() // 2)
and new_obj.operand_a):
# Matrix multiply 16816|1688.TF32 A
# Q0 Q2 (check mma inst for more details.)
# Q1 Q3
partition_contiguous_idx = (lane_idx % new_obj.layout.factor)
access_contiguous_idx = ((
quad_quad ^ (lane_in_quad_pair // new_obj.layout.factor)))
access_strided_idx = (lane_in_quad_quad //
new_obj.layout.factor)
elif (new_obj.lds_shape[0] == (new_obj.lds_shape.prod() // 2)
and not new_obj.operand_a):
# 16816: f16
# Matrix multiply 16816|1688.TF32 B
# Q0 Q1
# Q2 Q3
partition_contiguous_idx = (lane_idx % new_obj.layout.factor)
access_contiguous_idx = (
((quad_pair & 1) ^
(lane_in_quad_pair // new_obj.layout.factor)))
access_strided_idx = ((lane_in_quad_pair +
(lane_idx >> 4 << 3)) //
new_obj.layout.factor)
elif (new_obj.lds_shape[1] == new_obj.lds_shape.prod()):
# Matrix multiply 16832.SP B
# Q0 Q1 Q2 Q3
partition_contiguous_idx = (lane_idx % new_obj.layout.factor)
access_contiguous_idx = ((
quad_pair ^ (lane_in_quad_pair // new_obj.layout.factor)))
access_strided_idx = lane_in_quad_pair // new_obj.layout.factor
elif new_obj.layout.factor == 1:
# Super Matrix multiply kBlock = 64
if (new_obj.lds_shape[0] == new_obj.lds_shape.prod()):
# Q0
# Q1
# Q2
# Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2)
access_contiguous_idx = lane_in_quad
access_strided_idx = lane_idx
elif (new_obj.lds_shape[0] == (new_obj.lds_shape.prod() // 2)
and new_obj.operand_a):
# Matrix multiply 16816|1688.TF32 A
# Q0 Q2
# Q1 Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2)
access_contiguous_idx = (quad_quad ^ lane_in_quad)
access_strided_idx = lane_in_quad_quad
elif (new_obj.lds_shape[0] == (new_obj.lds_shape.prod() // 2)
and not new_obj.operand_a):
# Matrix multiply 16816|1688.TF32 B
# Q0 Q1
# Q2 Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2)
access_contiguous_idx = ((quad_pair & 1) ^ lane_in_quad)
access_strided_idx = lane_in_quad_pair + (lane_idx >> 4 << 3)
elif (new_obj.lds_shape[1] == new_obj.lds_shape.prod()):
# Matrix multiply 16832.SP B
# Q0 Q1 Q2 Q3
partition_contiguous_idx = (lane_in_quad_pair >> 2)
access_contiguous_idx = (quad_pair ^ lane_in_quad)
access_strided_idx = lane_in_quad_pair
else:
raise NotImplementedError
access_contiguous = (
partition_contiguous_idx * new_obj.layout.part_shape[1] +
access_contiguous_idx)
access_strided = access_strided_idx
expected_offset = access_contiguous + access_strided_idx * new_obj.stride_
ref_offset = layout.get_ldm_initial_offset_ref(lane_idx,
self.lds_shape,
not self.operand_a)
assert ref_offset == expected_offset * self.element_per_acc, f"{lane_idx}, {expected_offset}, {ref_offset}"
new_obj.byte_offset_ = (
(access_contiguous + access_strided * new_obj.stride_) *
new_obj.dtype.bitsize() * new_obj.element_per_acc // 8)
# for k part, of partk == 2 and k of smem is 4, then num warp gemm iters is 2
# so 0 1
# 0 1
new_obj.add_tile_offset_python(
warp_idx_mn, new_obj.num_warp_gemm_iters * warp_idx_k)
return new_obj
@pccm.cuda.member_function(device=True, forceinline=True)
def add_tile_offset(self):
code = pccm.FunctionCode(f"""
int mn_offset = warp_idx_mn;
int k_offset = warp_idx_k;
// tv::printf2_block_once(threadIdx.x, k_offset, mn_offset);
int whole_tiles = mn_offset / {self.k_groups_per_tile};
int k_groups_delta = mn_offset % {self.k_groups_per_tile};
byte_offset_ ^= k_groups_delta * {self.dtype.bitsize()} *
{self.layout.element_per_acc} *
{self.lds_shape[1]} / 8;
// tv::printf2_block_once(threadIdx.x, "premuteK", byte_offset_);
pointer_ +=
k_offset * stride_ * {self.warp_tile_shape_km[0]} / {self.layout.factor} +
whole_tiles * stride_ / sections_;
""")
return code.arg("warp_idx_k, warp_idx_mn", "int")
def add_tile_offset_python(self, warp_idx_k: int, warp_idx_mn: int):
mn_offset = warp_idx_mn
k_offset = warp_idx_k
whole_tiles = mn_offset // self.k_groups_per_tile
k_groups_delta = mn_offset % self.k_groups_per_tile
self.byte_offset_ ^= int(k_groups_delta * self.dtype.bitsize() *
self.layout.element_per_acc *
self.lds_shape[1] // 8)
self.pointer_ += (k_offset * self.stride_ *
self.warp_tile_shape_km[0] // self.layout.factor +
whole_tiles * self.my_layout.sw_shape[1])
@pccm.cuda.member_function(device=True, forceinline=True)
def tile_increment(self):
code = pccm.FunctionCode(f"""
add_tile_offset(0, num_tile);
""")
return code.arg("num_tile", "int")
def tile_increment_python(self, num: int):
return self.add_tile_offset_python(0, num)
@pccm.cuda.member_function(name="operator++",
device=True,
forceinline=True)
def operator_pp(self):
code = pccm.FunctionCode(f"""
// Integer matrix multiply 16832 Interleaved-32
// NONE
// Integer matrix multiply 16816 Interleaved-32 || Integer matrix multiply 16816 kblock=32
// Integer matrix multiply 8816 Interleaved-32
// ^1 ^1
// Matrix multiply 1684.TF32 kblock=16 || Integer matrix multiply 16816 kblock=64
// Matrix multiply 1688 kblock=32 || Integer matrix multiply 8816 kblock=64
// ^1 ^3 ^1 ^3
// Matrix multiply 1688 kblock=64
// ^1 ^3 ^1 ^7 ^1 ^3 ^1 ^7
// Matrix multiply 16816 kblock=32 | 1688.TF32 kblock=16 || Integer matrix multiply 16832 kblock=64
// ^2 ^2
// Matrix multiply 16816 kblock=64 | 1688.TF32 kblock=32 || Integer matrix multiply 16832 kblock=128
// ^2 ^6 ^2 ^6
if (({self.k_groups_per_tile} / {self.partk}) > 1) {{
int mask = (({self.k_groups_per_tile} / {self.partk}) == 8)
? 3
: ((({self.k_groups_per_tile} / {self.partk}) == 4) ? 1 : 0);
if (((wmma_k_index_ & mask) % 2) == 0)
byte_offset_ ^= 1 * {self.lds_shape[1]} *
{self.dtype.bitsize()} *
{self.layout.element_per_acc} / 8;
else if ((wmma_k_index_ & mask) == 1)
byte_offset_ ^= 3 * {self.lds_shape[1]} *
{self.dtype.bitsize()} *
{self.layout.element_per_acc} / 8;
else if ((wmma_k_index_ & mask) == 3)
byte_offset_ ^= 7 * {self.lds_shape[1]} *
{self.dtype.bitsize()} *
{self.layout.element_per_acc} / 8;
}}
wmma_k_index_++;
// tv::printf2_block_once(threadIdx.x, "premuteK", byte_offset_);
if (wmma_k_index_ == ({self.k_groups_per_tile} / {self.partk})) {{
wmma_k_index_ = 0;
// k group increment
add_tile_offset(0, {self.k_groups_per_tile});
}}
return *this;
""")
return code.ret(f"{self.class_name} &")
def increment_python(self):
# Integer matrix multiply 16832 Interleaved-32
# NONE
# Integer matrix multiply 16816 Interleaved-32 || Integer matrix multiply 16816 kblock=32
# Integer matrix multiply 8816 Interleaved-32
# ^1 ^1
# Matrix multiply 1684.TF32 kblock=16 || Integer matrix multiply 16816 kblock=64
# Matrix multiply 1688 kblock=32 || Integer matrix multiply 8816 kblock=64
# ^1 ^3 ^1 ^3
# Matrix multiply 1688 kblock=64
# ^1 ^3 ^1 ^7 ^1 ^3 ^1 ^7
# Matrix multiply 16816 kblock=32 | 1688.TF32 kblock=16 || Integer matrix multiply 16832 kblock=64
# ^2 ^2
# Matrix multiply 16816 kblock=64 | 1688.TF32 kblock=32 || Integer matrix multiply 16832 kblock=128
# ^2 ^6 ^2 ^6
k_inc_width = self.lds_shape[1] * self.dtype.bitsize(
) * self.layout.element_per_acc // 8
num_k_inc = (self.k_groups_per_tile // self.partk)
if (num_k_inc > 1):
# mask: largest number of bit for increment
mask = self.k_group_inc_mask
# if self.k_groups_per_tile // self.partk == 8:
# mask = 0b11
# elif self.k_groups_per_tile // self.partk == 4:
# mask = 0b1
# else:
# mask = 0
# bit 0 advance
self.byte_offset_ ^= layout_tensorop.swizzle_increment(
self.wmma_k_index_ & mask, k_inc_width)
# if (((self.wmma_k_index_ & mask) % 2) == 0):
# self.byte_offset_ ^= (1 * self.lds_shape[1] *
# self.dtype.bitsize() *
# self.layout.element_per_acc // 8)
# # bit 1 advance
# elif ((self.wmma_k_index_ & mask) == 1):
# self.byte_offset_ ^= (0b11 * self.lds_shape[1] *
# self.dtype.bitsize() *
# self.layout.element_per_acc // 8)
# # bit 2 advance
| |
# search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"""
class SearchNode:
"""
Creates node: <state, action, parent_node>
"""
def __init__(self, state, action=None, parent=None):
self.state = state
self.action = action
self.parent = parent
def extract_solution(self):
""" Gets complete path from goal state to parent node """
action_path = []
search_node = self
while search_node:
if search_node.action:
action_path.append(search_node.action)
search_node = search_node.parent
return list(reversed(action_path))
start_node = SearchNode(problem.getStartState())
if problem.isGoalState(start_node.state):
return start_node.extract_solution()
frontier = util.Stack()
explored = set()
frontier.push(start_node)
# run until stack is empty
while not frontier.isEmpty():
node = frontier.pop() # choose the deepest node in frontier
explored.add(node.state)
if problem.isGoalState(node.state):
return node.extract_solution()
# expand node
successors = problem.getSuccessors(node.state)
for succ in successors:
# make-child-node
child_node = SearchNode(succ[0], succ[1], node)
if child_node.state not in explored:
frontier.push(child_node)
# no solution
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
class SearchNode:
"""
Creates node: <state, action, parent_node>
"""
def __init__(self, state, action=None, parent=None):
self.state = state
self.action = action
self.parent = parent
def extract_solution(self):
""" Gets complete path from goal state to parent node """
action_path = []
search_node = self
while search_node:
if search_node.action:
action_path.append(search_node.action)
search_node = search_node.parent
return list(reversed(action_path))
def is_in_frontier(self, data_structure):
for n in data_structure.list:
if n.state == self.state:
return True
return False
start_node = SearchNode(problem.getStartState())
if problem.isGoalState(start_node.state):
return start_node.extract_solution()
frontier = util.Queue() # FIFO
frontier.push(start_node)
explored = set()
while not frontier.isEmpty():
node = frontier.pop() # choose the shallowest node in frontier
explored.add(node.state)
if problem.isGoalState(node.state):
return node.extract_solution()
successors = problem.getSuccessors(node.state)
for succ in successors:
child_node = SearchNode(succ[0], succ[1], node)
if child_node.state not in explored and\
not child_node.is_in_frontier(frontier):
frontier.push(child_node)
# no solution
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
class SearchNode:
"""
Creates node: <state, action, cost, parent_node>
"""
def __init__(self, state, action=None, path_cost = 0, parent=None):
self.state = state
self.action = action
self.parent = parent
if parent:
self.path_cost = path_cost + parent.path_cost
else:
self.path_cost = path_cost
def extract_solution(self):
""" Gets complete path from goal state to parent node """
action_path = []
search_node = self
while search_node:
if search_node.action:
action_path.append(search_node.action)
search_node = search_node.parent
return list(reversed(action_path))
def is_in_priority_queue(self, priority_queue):
""" Check if the node is already in the priority queue """
for index, (p, c, i) in enumerate(priority_queue.heap):
if i.state == self.state:
return True
else:
return False
start_node = SearchNode(problem.getStartState())
if problem.isGoalState(start_node.state):
return start_node.extract_solution()
frontier = util.PriorityQueue() # FIFO
frontier.push(start_node, start_node.path_cost)
explored = set()
while not frontier.isEmpty():
node = frontier.pop() # chooses the lowest-cost node in frontier
# goal-test
if problem.isGoalState(node.state):
return node.extract_solution()
if node.state not in explored:
explored.add(node.state)
successors = problem.getSuccessors(node.state)
for succ in successors:
child_node = SearchNode(succ[0], succ[1], succ[2], node)
frontier.update(child_node, child_node.path_cost)
# no solution
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
# In both pratical task and Assignment 1
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
# class to represent SearchNode
class SearchNode:
"""
Creates node: <state, action, f(s), g(s), h(s), parent_node>
"""
def __init__(self, state, action=None, g=None, h=None,
parent=None):
self.state = state
self.action = action
self.parent = parent
# heuristic value
self.h = h
# combined cost
if parent:
self.g = g + parent.g
else:
self.g = 0
# evaluation function value
self.f = self.g + self.h
def extract_solution(self):
""" Gets complete path from goal state to parent node """
action_path = []
search_node = self
while search_node:
if search_node.action:
action_path.append(search_node.action)
search_node = search_node.parent
return list(reversed(action_path))
# make search node function
def make_search_node(state, action=None, cost=None, parent=None):
if hasattr(problem, 'heuristicInfo'):
if parent:
# same parent - avoid re-calculation
# for reducing computations in logic
if parent == problem.heuristicInfo["parent"]:
problem.heuristicInfo["sameParent"] = True
else:
problem.heuristicInfo["sameParent"] = False
# adding parent info for reducing computations
problem.heuristicInfo["parent"] = parent
# get heuristic value
h_value = heuristic(state, problem)
return SearchNode(state, action, cost, h_value, parent)
# create open list
open = util.PriorityQueue()
node = make_search_node(problem.getStartState())
open.push(node, node.f)
closed = set()
best_g = {} # maps states to numbers
# run until open list is empty
while not open.isEmpty():
node = open.pop() # pop-min
if node.state not in closed or node.g < best_g[node.state]:
closed.add(node.state)
best_g[node.state] = node.g
# goal-test
if problem.isGoalState(node.state):
return node.extract_solution()
# expand node
successors = problem.getSuccessors(node.state)
for succ in successors:
child_node = make_search_node(succ[0],succ[1],succ[2], node)
if child_node.h < float("inf"):
open.push(child_node, child_node.f)
# no solution
util.raiseNotDefined()
# Extensions Assignment 1
def iterativeDeepeningSearch(problem):
"""Search the deepest node in an iterative manner."""
class SearchNode:
"""
Creates node: <state, action, depth, parent_node>
"""
def __init__(self, state, action=None, depth = 0, parent=None):
self.state = state
self.action = action
self.parent = parent
if parent:
self.depth = depth + parent.depth
else:
self.depth = depth
def extract_solution(self):
""" Gets complete path from initial state to goal state """
action_path = []
search_node = self
while search_node:
if search_node.action:
action_path.append(search_node.action)
search_node = search_node.parent
return list(reversed(action_path))
# limit for IDS
limit = 0
# controlling infinite loop
LOOP_COUNT = 0
LOOP_LIMIT = 999999999
# running iteratively
# increasing limit until goal-state is found
while True:
# no solution hard limit check
if LOOP_COUNT == LOOP_LIMIT:
break
node = SearchNode(problem.getStartState())
# goal-test
if problem.isGoalState(node.state):
return node.extract_solution()
frontier = util.Stack() # LIFO stack
explored = set() # empty set
frontier.push(node)
# run until frontier is empty
while not frontier.isEmpty():
node = frontier.pop() # choose the deepest node in frontier
explored.add(node.state)
# never expand branch farther than the limit
if node.depth < limit:
# expand node
successors = problem.getSuccessors(node.state)
for succ in successors:
# make-child-node
# path step cost is considered as depth
child_node = SearchNode(succ[0], succ[1], succ[2], node)
# child.STATE is not in explored
if child_node.state not | |
<filename>tuf/scripts/repo.py
#!/usr/bin/env python
# Copyright 2018, New York University and the TUF contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
"""
<Program Name>
repo.py
<Author>
<NAME> <<EMAIL>>
<Started>
January 2018.
<Copyright>
See LICENSE-MIT OR LICENSE for licensing information.
<Purpose>
Provide a command-line interface to create and modify TUF repositories. The
CLI removes the need to write Python code when creating or modifying
repositories, which is the case with repository_tool.py and
developer_tool.py.
Note:
'pip install securesystemslib[crypto,pynacl]' is required by the CLI,
which installs the 3rd-party dependencies: cryptography and pynacl.
<Usage>
Note: arguments within brackets are optional.
$ repo.py --init
[--consistent, --bare, --path, --root_pw, --targets_pw,
--snapshot_pw, --timestamp_pw]
$ repo.py --add <target> <dir> ... [--path, --recursive]
$ repo.py --remove <glob pattern>
$ repo.py --distrust --pubkeys </path/to/pubkey> [--role]
$ repo.py --trust --pubkeys </path/to/pubkey> [--role]
$ repo.py --sign </path/to/key> [--role <targets>]
$ repo.py --key <keytype>
[--filename <filename>
--path </path/to/repo>, --pw [my_password]]
$ repo.py --delegate <glob pattern> --delegatee <rolename>
--pubkeys </path/to/pubkey>
[--role <rolename> --terminating --threshold <X>
--sign </path/to/role_privkey>]
$ repo.py --revoke --delegatee <rolename>
[--role <rolename> --sign </path/to/role_privkey>]
$ repo.py --verbose <0-5>
$ repo.py --clean [--path]
<Options>
--init:
Create new TUF repository in current working or specified directory.
--consistent:
Enable consistent snapshots for newly created TUF repository.
--bare:
Specify creation of bare TUF repository with no key created or set.
--path:
Choose specified path location of a TUF repository or key(s).
--role:
Specify top-level role(s) affected by the main command-line option.
--pubkeys:
Indicate location of key(s) affected by the main command-line option.
--root_pw:
Set password for encrypting top-level key file of root role.
--targets_pw:
Set password for encrypting top-level key file of targets role.
--snapshot_pw:
Set password for encrypting top-level key file of snapshot role.
--timestamp_pw:
Set password for encrypting top-level key file of timestamp role.
--add:
Add file specified by <target> to the Targets metadata.
--recursive:
Include files in subdirectories of specified directory <dir>.
--remove:
Remove target files from Targets metadata matching <glob pattern>.
--distrust:
Discontinue trust of keys located in </path/to/pubkey> directory of a role.
--trust:
Indicate trusted keys located in </path/to/pubkey> directory of a role.
--sign:
Sign metadata of target role(s) with keys in specified directory.
--key:
Generate cryptographic key of specified type <keytype> (default: Ed25519).
--filename:
Specify filename associated with generated top-level key.
--pw:
Set password for the generated key of specified type <keytype>.
--delegate:
Delegate trust of target files from Targets role (or <rolename> specified
in --role) to --delegatee role with specified <rolename>.
--delegatee:
Specify role that is targeted by delegator in --role to sign for
target files matching delegated <glob pattern> or in revocation of trust.
--terminating:
Mark delegation to --delegatee role from delegator as a terminating one.
--threshold:
Specify signature threshold of --delegatee role as the value <X>.
--revoke:
Revoke trust of target files from delegated role (--delegatee)
--verbose:
Set the verbosity level of logging messages. Accepts values 1-5.
--clean:
Delete repo in current working or specified directory.
"""
# Help with Python 2+3 compatibility, where the print statement is a function,
# an implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import logging
import argparse
import shutil
import time
import fnmatch
import securesystemslib # pylint: disable=unused-import
from securesystemslib import exceptions as sslib_exceptions
from securesystemslib import formats as sslib_formats
from securesystemslib import interface as sslib_interface
from securesystemslib import keys as sslib_keys
from securesystemslib import settings as sslib_settings
from securesystemslib import util as sslib_util
from tuf import exceptions
from tuf import formats
from tuf import keydb
from tuf import log
from tuf import repository_tool as repo_tool
from tuf import roledb
# See 'log.py' to learn how logging is handled in TUF.
logger = logging.getLogger(__name__)
repo_tool.disable_console_log_messages()
PROG_NAME = 'repo.py'
REPO_DIR = 'tufrepo'
CLIENT_DIR = 'tufclient'
KEYSTORE_DIR = 'tufkeystore'
ROOT_KEY_NAME = 'root_key'
TARGETS_KEY_NAME = 'targets_key'
SNAPSHOT_KEY_NAME = 'snapshot_key'
TIMESTAMP_KEY_NAME = 'timestamp_key'
STAGED_METADATA_DIR = 'metadata.staged'
METADATA_DIR = 'metadata'
# The keytype strings, as expected on the command line.
ED25519_KEYTYPE = 'ed25519'
ECDSA_KEYTYPE = 'ecdsa'
RSA_KEYTYPE = 'rsa'
SUPPORTED_CLI_KEYTYPES = (ECDSA_KEYTYPE, ED25519_KEYTYPE, RSA_KEYTYPE)
# The supported keytype strings (as they appear in metadata) are listed here
# because they won't necessarily match the key types supported by
# securesystemslib.
SUPPORTED_KEY_TYPES = ('ed25519', 'ecdsa-sha2-nistp256', 'rsa')
# pylint: disable=protected-access
# ... to allow use of sslib _generate_and_write_*_keypair convenience methods
def process_command_line_arguments(parsed_arguments):
"""
<Purpose>
Perform the relevant operations on the repo according to the chosen
command-line options. Which functions are executed depends on
'parsed_arguments'. For instance, the --init and --clean options will
cause the init_repo() and clean_repo() functions to be called.
Multiple operations can be executed in one invocation of the CLI.
<Arguments>
parsed_arguments:
The parsed arguments returned by argparse.parse_args().
<Exceptions>
securesystemslib.exceptions.Error, if any of the arguments are
improperly formatted or if any of the argument could not be processed.
<Side Effects>
None.
<Returns>
None.
"""
# Do we have a valid argparse Namespace?
if not isinstance(parsed_arguments, argparse.Namespace):
raise exceptions.Error('Invalid namespace: ' + repr(parsed_arguments))
else:
logger.debug('We have a valid argparse Namespace.')
# TODO: Make sure the order that the arguments are processed allows for the
# most convenient use of multiple options in one invocation of the CLI. For
# instance, it might be best for --clean to be processed first before --init
# so that a user can do the following: repo.py --clean --init (that is, first
# clear the repo in the current working directory, and then initialize a new
# one.
if parsed_arguments.clean:
clean_repo(parsed_arguments)
if parsed_arguments.init:
init_repo(parsed_arguments)
if parsed_arguments.remove:
remove_targets(parsed_arguments)
if parsed_arguments.add:
add_targets(parsed_arguments)
if parsed_arguments.distrust:
remove_verification_key(parsed_arguments)
if parsed_arguments.trust:
add_verification_key(parsed_arguments)
if parsed_arguments.key:
gen_key(parsed_arguments)
if parsed_arguments.revoke:
revoke(parsed_arguments)
if parsed_arguments.delegate:
delegate(parsed_arguments)
# --sign should be processed last, after the other options, so that metadata
# is signed last after potentially being modified by the other options.
if parsed_arguments.sign:
sign_role(parsed_arguments)
def delegate(parsed_arguments):
if not parsed_arguments.delegatee:
raise exceptions.Error(
'--delegatee must be set to perform the delegation.')
if parsed_arguments.delegatee in ('root', 'snapshot', 'timestamp', 'targets'):
raise exceptions.Error(
'Cannot delegate to the top-level role: ' + repr(parsed_arguments.delegatee))
if not parsed_arguments.pubkeys:
raise exceptions.Error(
'--pubkeys must be set to perform the delegation.')
public_keys = []
for public_key in parsed_arguments.pubkeys:
imported_pubkey = import_publickey_from_file(public_key)
public_keys.append(imported_pubkey)
repository = repo_tool.load_repository(
os.path.join(parsed_arguments.path, REPO_DIR))
if parsed_arguments.role == 'targets':
repository.targets.delegate(parsed_arguments.delegatee, public_keys,
parsed_arguments.delegate, parsed_arguments.threshold,
parsed_arguments.terminating, list_of_targets=None,
path_hash_prefixes=None)
targets_private = import_privatekey_from_file(
os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME),
parsed_arguments.targets_pw)
repository.targets.load_signing_key(targets_private)
# A delegated (non-top-level-Targets) role.
else:
repository.targets(parsed_arguments.role).delegate(
parsed_arguments.delegatee, public_keys,
parsed_arguments.delegate, parsed_arguments.threshold,
parsed_arguments.terminating, list_of_targets=None,
path_hash_prefixes=None)
# Update the required top-level roles, Snapshot and Timestamp, to make a new
# release. Automatically making a new release can be disabled via
# --no_release.
if not parsed_arguments.no_release:
snapshot_private = import_privatekey_from_file(
os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME),
parsed_arguments.snapshot_pw)
timestamp_private = import_privatekey_from_file(
os.path.join(parsed_arguments.path, KEYSTORE_DIR,
TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw)
repository.snapshot.load_signing_key(snapshot_private)
repository.timestamp.load_signing_key(timestamp_private)
consistent_snapshot = roledb.get_roleinfo('root',
repository._repository_name)['consistent_snapshot']
repository.writeall(consistent_snapshot=consistent_snapshot)
# Move staged metadata directory to "live" metadata directory.
write_to_live_repo(parsed_arguments)
def revoke(parsed_arguments):
repository = repo_tool.load_repository(
os.path.join(parsed_arguments.path, REPO_DIR))
if parsed_arguments.role == 'targets':
repository.targets.revoke(parsed_arguments.delegatee)
targets_private = import_privatekey_from_file(
os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME),
parsed_arguments.targets_pw)
repository.targets.load_signing_key(targets_private)
# A non-top-level role.
else:
repository.targets(parsed_arguments.role).revoke(parsed_arguments.delegatee)
role_privatekey = import_privatekey_from_file(parsed_arguments.sign)
repository.targets(parsed_arguments.role).load_signing_key(role_privatekey)
# Update the required top-level roles, Snapshot and Timestamp, to make a new
# release. Automatically making a new release can be disabled via
# --no_release.
if not parsed_arguments.no_release:
snapshot_private = import_privatekey_from_file(
os.path.join(parsed_arguments.path, KEYSTORE_DIR, SNAPSHOT_KEY_NAME),
parsed_arguments.snapshot_pw)
timestamp_private = import_privatekey_from_file(
os.path.join(parsed_arguments.path, KEYSTORE_DIR,
TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw)
repository.snapshot.load_signing_key(snapshot_private)
repository.timestamp.load_signing_key(timestamp_private)
consistent_snapshot = roledb.get_roleinfo('root',
repository._repository_name)['consistent_snapshot']
repository.writeall(consistent_snapshot=consistent_snapshot)
# Move staged metadata directory to "live" metadata directory.
write_to_live_repo(parsed_arguments)
def gen_key(parsed_arguments):
if parsed_arguments.filename:
parsed_arguments.filename = os.path.join(parsed_arguments.path,
KEYSTORE_DIR, parsed_arguments.filename)
keypath = None
keygen_kwargs = {
"password": parsed_arguments.pw,
"filepath": parsed_arguments.filename,
"prompt": (not parsed_arguments.pw) # prompt if no default or passed pw
}
if parsed_arguments.key not in SUPPORTED_CLI_KEYTYPES:
exceptions.Error(
'Invalid key type: ' + repr(parsed_arguments.key) + '. Supported'
' key types: ' + repr(SUPPORTED_CLI_KEYTYPES))
elif parsed_arguments.key == ECDSA_KEYTYPE:
keypath = sslib_interface._generate_and_write_ecdsa_keypair(
**keygen_kwargs)
elif parsed_arguments.key == ED25519_KEYTYPE:
keypath = sslib_interface._generate_and_write_ed25519_keypair(
**keygen_kwargs)
# RSA key..
else:
keypath = sslib_interface._generate_and_write_rsa_keypair(
**keygen_kwargs)
# If a filename is not given, the generated keypair is saved to the current
# working directory. By default, the keypair is written to <KEYID>.pub
# and <KEYID> (private key).
if not parsed_arguments.filename:
privkey_repo_path = os.path.join(parsed_arguments.path,
| |
RESUMABLE_UPLOAD_THRESHOLD=5):
with _NamedTemporaryFile() as temp:
with open(temp.name, 'wb') as file_obj:
file_obj.write(DATA)
with open(temp.name, 'rb') as file_obj:
blob.upload_from_file(file_obj, rewind=True)
rq = connection.http._requested
self.assertEqual(len(rq), 3)
# Requested[0]
headers = dict(
[(x.title(), str(y)) for x, y in rq[0].pop('headers').items()])
self.assertEqual(headers['X-Upload-Content-Length'], '6')
self.assertEqual(headers['X-Upload-Content-Type'],
'application/octet-stream')
uri = rq[0].pop('uri')
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'example.com')
self.assertEqual(path, '/b/name/o')
self.assertEqual(dict(parse_qsl(qs)),
{'uploadType': 'resumable', 'name': BLOB_NAME})
self.assertEqual(rq[0], {
'method': 'POST',
'body': '',
'connection_type': None,
'redirections': 5,
})
# Requested[1]
headers = dict(
[(x.title(), str(y)) for x, y in rq[1].pop('headers').items()])
self.assertEqual(headers['Content-Range'], 'bytes 0-4/6')
self.assertEqual(rq[1], {
'method': 'PUT',
'uri': UPLOAD_URL,
'body': DATA[:5],
'connection_type': None,
'redirections': 5,
})
# Requested[2]
headers = dict(
[(x.title(), str(y)) for x, y in rq[2].pop('headers').items()])
self.assertEqual(headers['Content-Range'], 'bytes 5-5/6')
self.assertEqual(rq[2], {
'method': 'PUT',
'uri': UPLOAD_URL,
'body': DATA[5:],
'connection_type': None,
'redirections': 5,
})
def test_upload_from_file_w_slash_in_name(self):
from six.moves.http_client import OK
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
from gcloud._testing import _NamedTemporaryFile
from gcloud.streaming import http_wrapper
BLOB_NAME = 'parent/child'
UPLOAD_URL = 'http://example.com/upload/name/parent%2Fchild'
DATA = b'ABCDEF'
loc_response = {'status': OK, 'location': UPLOAD_URL}
chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE,
'range': 'bytes 0-4'}
chunk2_response = {'status': OK}
connection = _Connection(
(loc_response, '{}'),
(chunk1_response, ''),
(chunk2_response, ''),
)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._makeOne(BLOB_NAME, bucket=bucket)
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 5
with _NamedTemporaryFile() as temp:
with open(temp.name, 'wb') as file_obj:
file_obj.write(DATA)
with open(temp.name, 'rb') as file_obj:
blob.upload_from_file(file_obj, rewind=True)
self.assertEqual(file_obj.tell(), len(DATA))
rq = connection.http._requested
self.assertEqual(len(rq), 1)
self.assertEqual(rq[0]['redirections'], 5)
self.assertEqual(rq[0]['body'], DATA)
self.assertEqual(rq[0]['connection_type'], None)
self.assertEqual(rq[0]['method'], 'POST')
uri = rq[0]['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'example.com')
self.assertEqual(path, '/b/name/o')
self.assertEqual(dict(parse_qsl(qs)),
{'uploadType': 'media', 'name': 'parent/child'})
headers = dict(
[(x.title(), str(y)) for x, y in rq[0]['headers'].items()])
self.assertEqual(headers['Content-Length'], '6')
self.assertEqual(headers['Content-Type'], 'application/octet-stream')
def _upload_from_filename_test_helper(self, properties=None,
content_type_arg=None,
expected_content_type=None):
from six.moves.http_client import OK
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
from gcloud._testing import _NamedTemporaryFile
from gcloud.streaming import http_wrapper
BLOB_NAME = 'blob-name'
UPLOAD_URL = 'http://example.com/upload/name/key'
DATA = b'ABCDEF'
loc_response = {'status': OK, 'location': UPLOAD_URL}
chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE,
'range': 'bytes 0-4'}
chunk2_response = {'status': OK}
connection = _Connection(
(loc_response, '{}'),
(chunk1_response, ''),
(chunk2_response, ''),
)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._makeOne(BLOB_NAME, bucket=bucket,
properties=properties)
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 5
with _NamedTemporaryFile(suffix='.jpeg') as temp:
with open(temp.name, 'wb') as file_obj:
file_obj.write(DATA)
blob.upload_from_filename(temp.name,
content_type=content_type_arg)
rq = connection.http._requested
self.assertEqual(len(rq), 1)
self.assertEqual(rq[0]['method'], 'POST')
uri = rq[0]['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'example.com')
self.assertEqual(path, '/b/name/o')
self.assertEqual(dict(parse_qsl(qs)),
{'uploadType': 'media', 'name': BLOB_NAME})
headers = dict(
[(x.title(), str(y)) for x, y in rq[0]['headers'].items()])
self.assertEqual(headers['Content-Length'], '6')
self.assertEqual(headers['Content-Type'], expected_content_type)
def test_upload_from_filename(self):
self._upload_from_filename_test_helper(
expected_content_type='image/jpeg')
def test_upload_from_filename_with_content_type(self):
EXPECTED_CONTENT_TYPE = 'foo/bar'
self._upload_from_filename_test_helper(
properties={'contentType': EXPECTED_CONTENT_TYPE},
expected_content_type=EXPECTED_CONTENT_TYPE)
def test_upload_from_filename_with_content_type_passed(self):
EXPECTED_CONTENT_TYPE = 'foo/bar'
self._upload_from_filename_test_helper(
content_type_arg=EXPECTED_CONTENT_TYPE,
expected_content_type=EXPECTED_CONTENT_TYPE)
def test_upload_from_filename_both_content_type_sources(self):
EXPECTED_CONTENT_TYPE = 'foo/bar'
ALT_CONTENT_TYPE = 'foo/baz'
self._upload_from_filename_test_helper(
properties={'contentType': ALT_CONTENT_TYPE},
content_type_arg=EXPECTED_CONTENT_TYPE,
expected_content_type=EXPECTED_CONTENT_TYPE)
def test_upload_from_string_w_bytes(self):
from six.moves.http_client import OK
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
from gcloud.streaming import http_wrapper
BLOB_NAME = 'blob-name'
UPLOAD_URL = 'http://example.com/upload/name/key'
DATA = b'ABCDEF'
loc_response = {'status': OK, 'location': UPLOAD_URL}
chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE,
'range': 'bytes 0-4'}
chunk2_response = {'status': OK}
connection = _Connection(
(loc_response, '{}'),
(chunk1_response, ''),
(chunk2_response, ''),
)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._makeOne(BLOB_NAME, bucket=bucket)
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 5
blob.upload_from_string(DATA)
rq = connection.http._requested
self.assertEqual(len(rq), 1)
self.assertEqual(rq[0]['method'], 'POST')
uri = rq[0]['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'example.com')
self.assertEqual(path, '/b/name/o')
self.assertEqual(dict(parse_qsl(qs)),
{'uploadType': 'media', 'name': BLOB_NAME})
headers = dict(
[(x.title(), str(y)) for x, y in rq[0]['headers'].items()])
self.assertEqual(headers['Content-Length'], '6')
self.assertEqual(headers['Content-Type'], 'text/plain')
self.assertEqual(rq[0]['body'], DATA)
def test_upload_from_string_w_text(self):
from six.moves.http_client import OK
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
from gcloud.streaming import http_wrapper
BLOB_NAME = 'blob-name'
UPLOAD_URL = 'http://example.com/upload/name/key'
DATA = u'ABCDEF\u1234'
ENCODED = DATA.encode('utf-8')
loc_response = {'status': OK, 'location': UPLOAD_URL}
chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE,
'range': 'bytes 0-4'}
chunk2_response = {'status': OK}
connection = _Connection(
(loc_response, '{}'),
(chunk1_response, ''),
(chunk2_response, ''),
)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._makeOne(BLOB_NAME, bucket=bucket)
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 5
blob.upload_from_string(DATA)
rq = connection.http._requested
self.assertEqual(len(rq), 1)
self.assertEqual(rq[0]['method'], 'POST')
uri = rq[0]['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual(scheme, 'http')
self.assertEqual(netloc, 'example.com')
self.assertEqual(path, '/b/name/o')
self.assertEqual(dict(parse_qsl(qs)),
{'uploadType': 'media', 'name': BLOB_NAME})
headers = dict(
[(x.title(), str(y)) for x, y in rq[0]['headers'].items()])
self.assertEqual(headers['Content-Length'], str(len(ENCODED)))
self.assertEqual(headers['Content-Type'], 'text/plain')
self.assertEqual(rq[0]['body'], ENCODED)
def test_make_public(self):
from gcloud.storage.acl import _ACLEntity
BLOB_NAME = 'blob-name'
permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}]
after = {'acl': permissive}
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._makeOne(BLOB_NAME, bucket=bucket)
blob.acl.loaded = True
blob.make_public()
self.assertEqual(list(blob.acl), permissive)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME)
self.assertEqual(kw[0]['data'], {'acl': permissive})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_cache_control_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CACHE_CONTROL = 'no-cache'
properties = {'cacheControl': CACHE_CONTROL}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_cache_control_setter(self):
BLOB_NAME = 'blob-name'
CACHE_CONTROL = 'no-cache'
bucket = _Bucket()
blob = self._makeOne(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.cache_control, None)
blob.cache_control = CACHE_CONTROL
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_component_count(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._makeOne('blob-name', bucket=BUCKET,
properties={'componentCount': COMPONENT_COUNT})
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_component_count_unset(self):
BUCKET = object()
blob = self._makeOne('blob-name', bucket=BUCKET)
self.assertEqual(blob.component_count, None)
def test_component_count_string_val(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._makeOne(
'blob-name', bucket=BUCKET,
properties={'componentCount': str(COMPONENT_COUNT)})
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_content_disposition_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
properties = {'contentDisposition': CONTENT_DISPOSITION}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_disposition_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
bucket = _Bucket()
blob = self._makeOne(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.content_disposition, None)
blob.content_disposition = CONTENT_DISPOSITION
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_encoding_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_ENCODING = 'gzip'
properties = {'contentEncoding': CONTENT_ENCODING}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_encoding_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_ENCODING = 'gzip'
bucket = _Bucket()
blob = self._makeOne(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.content_encoding, None)
blob.content_encoding = CONTENT_ENCODING
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_language_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_LANGUAGE = 'pt-BR'
properties = {'contentLanguage': CONTENT_LANGUAGE}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_language_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_LANGUAGE = 'pt-BR'
bucket = _Bucket()
blob = self._makeOne(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.content_language, None)
blob.content_language = CONTENT_LANGUAGE
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_type_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_TYPE = 'image/jpeg'
properties = {'contentType': CONTENT_TYPE}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_content_type_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_TYPE = 'image/jpeg'
bucket = _Bucket()
blob = self._makeOne(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.content_type, None)
blob.content_type = CONTENT_TYPE
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_crc32c_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CRC32C = 'DEADBEEF'
properties = {'crc32c': CRC32C}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.crc32c, CRC32C)
def test_crc32c_setter(self):
BLOB_NAME = 'blob-name'
CRC32C = 'DEADBEEF'
bucket = _Bucket()
blob = self._makeOne(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.crc32c, None)
blob.crc32c = CRC32C
self.assertEqual(blob.crc32c, CRC32C)
def test_etag(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
ETAG = 'ETAG'
properties = {'etag': ETAG}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.etag, ETAG)
def test_generation(self):
BUCKET = object()
GENERATION = 42
blob = self._makeOne('blob-name', bucket=BUCKET,
properties={'generation': GENERATION})
self.assertEqual(blob.generation, GENERATION)
def test_generation_unset(self):
BUCKET = object()
blob = self._makeOne('blob-name', bucket=BUCKET)
self.assertEqual(blob.generation, None)
def test_generation_string_val(self):
BUCKET = object()
GENERATION = 42
blob = self._makeOne('blob-name', bucket=BUCKET,
properties={'generation': str(GENERATION)})
self.assertEqual(blob.generation, GENERATION)
def test_id(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
ID = 'ID'
properties = {'id': ID}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.id, ID)
def test_md5_hash_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
MD5_HASH = 'DEADBEEF'
properties = {'md5Hash': MD5_HASH}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_md5_hash_setter(self):
BLOB_NAME = 'blob-name'
MD5_HASH = 'DEADBEEF'
bucket = _Bucket()
blob = self._makeOne(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.md5_hash, None)
blob.md5_hash = MD5_HASH
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_media_link(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
MEDIA_LINK = 'http://example.com/media/'
properties = {'mediaLink': MEDIA_LINK}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.media_link, MEDIA_LINK)
def test_metadata_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
METADATA = {'foo': 'Foo'}
properties = {'metadata': METADATA}
blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.metadata, METADATA)
def test_metadata_setter(self):
BLOB_NAME = 'blob-name'
METADATA = {'foo': 'Foo'}
bucket = _Bucket()
blob = self._makeOne(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.metadata, None)
blob.metadata = METADATA
self.assertEqual(blob.metadata, METADATA)
def test_metageneration(self):
BUCKET = object()
METAGENERATION = 42
blob = self._makeOne('blob-name', bucket=BUCKET,
properties={'metageneration': METAGENERATION})
self.assertEqual(blob.metageneration, METAGENERATION)
def test_metageneration_unset(self):
BUCKET = object()
blob = self._makeOne('blob-name', bucket=BUCKET)
self.assertEqual(blob.metageneration, None)
def test_metageneration_string_val(self):
BUCKET = object()
METAGENERATION = 42
blob = self._makeOne(
'blob-name', bucket=BUCKET,
properties={'metageneration': str(METAGENERATION)})
self.assertEqual(blob.metageneration, METAGENERATION)
def test_owner(self):
BLOB_NAME = 'blob-name'
| |
"""
Some basic inference functions adapted from my inferno module which should be available
here soon:
https://github.com/nealegibson/inferno
Really they are just rewritten versions of https://github.com/nealegibson/Infer
But there are many other options for optimisers/MCMCs/etc, and they should (in principle)
all do much the same job!
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fmin
from tqdm.auto import tqdm
import time
def fopt(f,x0,var=None,args=[],min=False,**kwargs):
"""
Optimisation function using scipy's fmin.
This uses a simple wrapper to allow maximising as well as minimising functions, as well
as allowing for a fixed set of parameters. inferno.opt or inferno.optimise has more
options.
f - function to be optimised, which can be called as f(x0,*args)
x0 - array of starting points
var - array with the same length as x0 indicating variable parameters. Parameters are
variable if >0, so boolean True/False, integers, or even error arrays will work
args - additional arguments passed to fmin (see scipy.optimize.fmin)
min - if True minimises rather than maximises the function f wrt x0
kwargs - additional keyword args are passed to fmin
"""
if var is None: var = np.ones(x0.size)
var_ind = var>0
x = np.copy(x0)
#define wrapper function to re-construct the full parameter array from subset
def wrapper(p,*args):
x[var_ind] = p # get full parameter array
if min: return f(x,*args) #call the function and return
else: return -f(x,*args) #or the negative of the func to maximise the function instead
#call scipy's fmin on the wrapper function
x[var_ind] = fmin(wrapper,x0[var_ind],args=args,**kwargs)
return x
def apply_uniform_logPrior(logL,bounds):
"""
Simple funciton decorator that takes in a log Likelihood function, and returns a log Posterior
that is bounded according to bounds.
logL - input function of form logL(p,*args,**kwargs)
bounds - list of (lower/upper) bounds for each parameter
e.g. for a parameter vector of length 5, could use:
bounds = [(0,1),(-np.inf,np.inf),None,(-10,10),(2,9)]
then can define logP from logL as follows:
logP = apply_uniform_prior(logL,bounds)
logP is then a callable function just like logL
logP(p,*args,...)
"""
#convert bounds into lower and upper limits of len(p)
lower_lims = np.array([i[0] if i is not None else -np.inf for i in bounds])
upper_lims = np.array([i[1] if i is not None else np.inf for i in bounds])
#define the posterior distribution
def logP(p,*args,**kwargs):
#apply priors - return -np.inf if outside prior range
if np.any(p<lower_lims): return -np.inf
if np.any(p>upper_lims): return -np.inf
return logL(p,*args,**kwargs) #else return the logL
return logP #return the function
def uniform_logPrior(bounds):
"""
same as above, but returns the logPrior directly
"""
lower = np.array([t[0] if type(t) is tuple else -np.inf for t in bounds])
upper = np.array([t[1] if type(t) is tuple else np.inf for t in bounds])
def log_prior(x,*args,**kwargs):
"""
logPrior with simple bounds
"""
if np.any(x<lower): return -np.inf
elif np.any(x>upper): return -np.inf
else: return 0.
return log_prior
def miniMCMC(f,x,x_err,burnin,chain,N=32,args=[],a=2,dlogP=50):
"""
Simple MCMC function. This is missing a lot of functionality and checks of more
extensive codes, see inferno.mcmc for a fully-featured mcmc implementation with multiple
flavours of MCMC.
This version runs a simple implementation of an Affine Invariant MCMC (this is a
misnomer as most non-trivial Metropolis Hastings steps can be affine invariant!).
However, this MCMC flavour does require minimal tuning, and therefore is good for
testing. See Goodman & Weare (2010) for a description of the algorithm, and
Foreman-Mackey et al. (2012) for another clear description of the algorithm (and of the
widely used 'emcee' implementation).
The basic idea is that at each step, we loop through each chain in turn, and pick
another random chain, and create a proposal based on the positions of the current and
random other chain. This implementation splits the set of chains in two, and picks a
random chain from the other set. This allows the proposals to be pre-computed, and also
for the logPosteriors to be computed in parallel. See Foreman-Mackey et al. (2012) for
an explanation.
The initial set of chains are simply drawn from a diagonalised Gaussian distribution.
Chains are replaced at random if more than dlogP from maximum likelihood to ensure the
starting points are ok. If more than half the points are outside this range code will
raise an exception. This is usually because the starting distribution is too large for
one of the parameters, and/or the starting point is far from the max. This can usually
be fixed by calling again with smaller x_err. Note that full mcmc implementation
inferno.mcmc has many more features for refining starting points and running different
flavours of MCMC.
The total number of samples will be N * (burnin * chain)
inputs
------
f - logPosterior function, which can be called as f(x,*args), and returns the value
of the logPosterior for the parameter vector x
x - array of starting points (means) to populate initial chains
x_err - corresponding uncertainties (stdevs)
burnin - length of burnin period, where current state is not recorded
chain - length of chain, where each state of the chain is recorded
N - number of chains/walkers, must be even greater than 16
args - additional arguments to the logPosterior f - i.e. f(x,*args)
a - parameter used to control the acceptance ratio. This can be varied based on the
acceptance, but is fixed here. See inferno.mcmc(mode="AffInv")
dlogP - starting points for chains are rejected if more than dlogP from the maximum
likelihood computed from the initial draw. This will include points in restricted
prior space (ie with f(x)=-np.inf). If more than a quarter are outside this
range, will raise an exception
returns a dictionary with the following parameters
--------------------------------------------------
'p' - means of the parameters
'e' - standard deviation of the parameters
'chains' - array of chains of shape: (chains x N x len(x))
'logP' - corresponding values of logP at each point in the chain, shape: (chains x N)
'Xinit' - initial points in the chain, useful for debugging, shape: (N x len(x))
'Xinit_logP' - corresponding values of logP, shape: (N)
"""
#check a few inputs are ok
assert N%2==0 and N>16, "N must be even and greater than 16"
#define simple mapping function, written in this way to allow easy parallelisation with multiprocessing
def f_args(x): return f(x,*args) #create simple wrapper function that doesn't require args
def map_func(X): return np.array(list(map(f_args,X))) #create mapping function using default map
#get starting points for the chains and compute logP for each
X = np.random.multivariate_normal(x,np.diag(x_err**2),N) #use gaussian distribution
XlogP = map_func(X) #compute logP
Xinit,Xinit_logP=np.copy(X),np.copy(XlogP)
#define arrays for chains
chains = np.empty((chain,N,x.size)) # accepted chains
logP = np.empty((chain,N)) # accepted posterior values
n_steps = burnin+chain #and total number of steps
acc = np.full((n_steps,N),False) # acceptance array, start with all False
#re-draw starting points for outliers
cull_index = XlogP.max() - XlogP > dlogP
if np.sum(~cull_index) < np.sum(x_err>0.)*2: #raise exception if number of good points is too low
raise ValueError("too many points ({}/{}) with ndim {} are outside acceptable range, use smaller x_err".format(np.sum(cull_index),len(cull_index),np.sum(x_err>0.)))
if np.any(cull_index):
print("redrawing {}/{} points".format(np.sum(cull_index),N))
ind_good = np.where(~cull_index)[0]
good_points_ind = np.random.choice(ind_good,cull_index.sum())
X[cull_index],XlogP[cull_index] = X[good_points_ind],XlogP[good_points_ind]
#predefine random arrays, for acceptance, step sizes, and random other chain
RandNoArr = np.random.rand(n_steps,N) #for acceptance step
#then z and z^D-1 used in proposal and acceptance
z = (np.random.rand(n_steps,N) * (np.sqrt(4.*a)-np.sqrt(4./a)) + np.sqrt(4./a))**2 / 4.
z_Dm1 = z**(np.sum(x_err>0.)-1)
#pick random other chain to use for each step
rand_chain = np.random.randint(0,N//2,(n_steps,N)) #first pick a random value from 0 to N//2 for each chain
rand_chain[:,:N//2]+=N//2 #then add on N//2 for the 1st set
slices = [slice(0,N//2),slice(N//2,None)]
start_time = time.time() #get start time
#compute MCMC chain
for i in tqdm(range(n_steps),position=0,desc='running mcmc chain'):
for sl in slices: #loop over each half of chains in turn
#get proposal steps and compute logP
X_prop = X[rand_chain[i,sl]] + z[i][sl,np.newaxis] * (X[sl] - X[rand_chain[i,sl]])
XlogP_prop = map_func(X_prop) #compute logP for proposal | |
import math
import os
import sys
from ConfigParser import RawConfigParser
DEFAULT_SIZE = 400
BOARD_SIZE = 8
CHECKER_SIZE = 30
MAX_VALID_SQ = 32
MOVE = 0
JUMP = 1
OCCUPIED = 0
BLACK = 1
WHITE = 2
MAN = 4
KING = 8
FREE = 16
COLORS = BLACK | WHITE
TYPES = OCCUPIED | BLACK | WHITE | MAN | KING | FREE
HUMAN = 0
COMPUTER = 1
MIN = 0
MAX = 1
IMAGE_DIR = 'images' + os.sep
RAVEN_ICON = IMAGE_DIR + '_raven.ico'
BULLET_IMAGE = IMAGE_DIR + 'bullet_green.gif'
CROWN_IMAGE = IMAGE_DIR + 'crown.gif'
BOLD_IMAGE = IMAGE_DIR + 'text_bold.gif'
ITALIC_IMAGE = IMAGE_DIR + 'text_italic.gif'
BULLETS_IMAGE = IMAGE_DIR + 'text_list_bullets.gif'
NUMBERS_IMAGE = IMAGE_DIR + 'text_list_numbers.gif'
ADDLINK_IMAGE = IMAGE_DIR + 'link.gif'
REMLINK_IMAGE = IMAGE_DIR + 'link_break.gif'
UNDO_IMAGE = IMAGE_DIR + 'resultset_previous.gif'
UNDO_ALL_IMAGE = IMAGE_DIR + 'resultset_first.gif'
REDO_IMAGE = IMAGE_DIR + 'resultset_next.gif'
REDO_ALL_IMAGE = IMAGE_DIR + 'resultset_last.gif'
LIGHT_SQUARES = 'tan'
DARK_SQUARES = 'dark green'
OUTLINE_COLOR = 'white'
LIGHT_CHECKERS = 'white'
DARK_CHECKERS = 'red'
WHITE_CHAR = 'w'
WHITE_KING = 'W'
BLACK_CHAR = 'b'
BLACK_KING = 'B'
FREE_CHAR = '.'
OCCUPIED_CHAR = '-'
INFINITY = 9999999
MAX_DEPTH = 10
VERSION = '0.4'
TITLE = 'Raven ' + VERSION
PROGRAM_TITLE = 'Raven Checkers'
CUR_DIR = sys.path[0]
TRAINING_DIR = 'training'
# search values for transposition table
hashfALPHA, hashfBETA, hashfEXACT = range(3)
# constants for evaluation function
TURN = 2 # color to move gets + turn
BRV = 3 # multiplier for back rank
KCV = 5 # multiplier for kings in center
MCV = 1 # multiplier for men in center
MEV = 1 # multiplier for men on edge
KEV = 5 # multiplier for kings on edge
CRAMP = 5 # multiplier for cramp
OPENING = 2 # multipliers for tempo
MIDGAME = -1
ENDGAME = 2
INTACT_DOUBLE_CORNER = 3
BLACK_IDX = [5, 6]
WHITE_IDX = [-5, -6]
KING_IDX = [-6, -5, 5, 6]
FIRST = 0
MID = 1
LAST = -1
# (white)
# 45 46 47 48
# 39 40 41 42
# 34 35 36 37
# 28 29 30 31
# 23 24 25 26
# 17 18 19 20
# 12 13 14 15
# 6 7 8 9
# (black)
# other squares reachable from a particular square with a white man
WHITE_MAP = {45: {39, 40, 34, 35, 28, 29, 30, 23, 24, 25, 17, 18, 19, 20, 12, 13, 14, 15, 6, 7, 8, 9},
46: {40, 41, 34, 35, 36, 28, 29, 30, 31, 23, 24, 25, 26, 17, 18, 19, 20, 12, 13, 14, 15, 6, 7, 8, 9},
47: {41, 42, 35, 36, 37, 29, 30, 31, 23, 24, 25, 26, 17, 18, 19, 20, 12, 13, 14, 15, 6, 7, 8, 9},
48: {42, 36, 37, 30, 31, 24, 25, 26, 18, 19, 20, 12, 13, 14, 15, 6, 7, 8, 9},
39: {34, 28, 29, 23, 24, 17, 18, 19, 12, 13, 14, 6, 7, 8, 9},
40: {34, 35, 28, 29, 30, 23, 24, 25, 17, 18, 19, 20, 12, 13, 14, 15, 6, 7, 8, 9},
41: {35, 36, 29, 30, 31, 23, 24, 25, 26, 17, 18, 19, 20, 12, 13, 14, 15, 6, 7, 8, 9},
42: {36, 37, 30, 31, 24, 25, 26, 18, 19, 20, 12, 13, 14, 15, 6, 7, 8, 9},
34: {28, 29, 23, 24, 17, 18, 19, 12, 13, 14, 6, 7, 8, 9},
35: {29, 30, 23, 24, 25, 17, 18, 19, 20, 12, 13, 14, 15, 6, 7, 8, 9},
36: {30, 31, 24, 25, 26, 18, 19, 20, 12, 13, 14, 15, 6, 7, 8, 9},
37: {31, 25, 26, 19, 20, 13, 14, 15, 7, 8, 9},
28: {23, 17, 18, 12, 13, 6, 7, 8},
29: {23, 24, 17, 18, 19, 12, 13, 14, 6, 7, 8, 9},
30: {24, 25, 18, 19, 20, 12, 13, 14, 15, 6, 7, 8, 9},
31: {25, 26, 19, 20, 13, 14, 15, 7, 8, 9},
23: {17, 18, 12, 13, 6, 7, 8},
24: {18, 19, 12, 13, 14, 6, 7, 8, 9},
25: {19, 20, 13, 14, 15, 7, 8, 9},
26: {20, 14, 15, 8, 9},
17: {12, 6, 7},
18: {12, 13, 6, 7, 8},
19: {13, 14, 7, 8, 9},
20: {14, 15, 8, 9},
12: {6, 7},
13: {7, 8},
14: {8, 9},
15: {9},
6: set(),
7: set(),
8: set(),
9: set()}
# (white)
# 45 46 47 48
# 39 40 41 42
# 34 35 36 37
# 28 29 30 31
# 23 24 25 26
# 17 18 19 20
# 12 13 14 15
# 6 7 8 9
# (black)
# other squares reachable from a particular square with a black man
BLACK_MAP = {6: {12, 17, 18, 23, 24, 28, 29, 30, 34, 35, 36, 39, 40, 41, 42, 45, 46, 47, 48},
7: {12, 13, 17, 18, 19, 23, 24, 25, 28, 29, 30, 31, 34, 35, 36, 37, 39, 40, 41, 42, 45, 46, 47, 48},
8: {13, 14, 18, 19, 20, 23, 24, 25, 26, 28, 29, 30, 31, 34, 35, 36, 37, 39, 40, 41, 42, 45, 46, 47, 48},
9: {14, 15, 19, 20, 24, 25, 26, 29, 30, 31, 34, 35, 36, 37, 39, 40, 41, 42, 45, 46, 47, 48},
12: {17, 18, 23, 24, 28, 29, 30, 34, 35, 36, 39, 40, 41, 42, 45, 46, 47, 48},
13: {18, 19, 23, 24, 25, 28, 29, 30, 31, 34, 35, 36, 37, 39, 40, 41, 42, 45, 46, 47, 48},
14: {19, 20, 24, 25, 26, 29, 30, 31, 34, 35, 36, 37, 39, 40, 41, 42, 45, 46, 47, 48},
15: {20, 25, 26, 30, 31, 35, 36, 37, 40, 41, 42, 45, 46, 47, 48},
17: {23, 28, 29, 34, 35, 39, 40, 41, 45, 46, 47},
18: {23, 24, 28, 29, 30, 34, 35, 36, 39, 40, 41, 42, 45, 46, 47, 48},
19: {24, 25, 29, 30, 31, 34, 35, 36, 37, 39, 40, 41, 42, 45, 46, 47, 48},
20: {25, 26, 30, 31, 35, 36, 37, 40, 41, 42, 45, 46, 47, 48},
23: {28, 29, 34, 35, 39, 40, 41, 45, 46, 47},
24: {29, 30, 34, 35, 36, 39, 40, 41, 42, 45, 46, 47, 48},
25: {30, 31, 35, 36, 37, 40, 41, 42, 45, 46, 47, 48},
26: {31, 36, 37, 41, 42, 46, 47, 48},
28: {34, 39, 40, 45, 46},
29: {34, 35, 39, 40, 41, 45, 46, 47},
30: {35, 36, 40, 41, 42, 45, 46, 47, 48},
31: {36, 37, 41, 42, 46, 47, 48},
34: {39, 40, 45, 46},
35: {40, 41, 45, 46, 47},
36: {41, 42, 46, 47, 48},
37: {42, 47, 48},
39: {45},
40: {45, 46},
41: {46, 47},
42: {47, 48},
45: set(),
46: set(),
47: set(),
48: set()}
# translate from simple input notation to real checkerboard notation
IMAP = {'a1': 5, 'c1': 6, 'e1': 7, 'g1': 8,
'b2': 10, 'd2': 11, 'f2': 12, 'h2': 13,
'a3': 14, 'c3': 15, 'e3': 16, 'g3': 17,
'b4': 19, 'd4': 20, 'f4': 21, 'h4': 22,
'a5': 23, 'c5': 24, 'e5': 25, 'g5': 26,
'b6': 28, 'd6': 29, 'f6': 30, 'h6': 31,
'a7': 32, 'c7': 33, 'e7': 34, 'g7': 35,
'b8': 37, 'd8': 38, 'f8': 39, 'h8': 40}
CB_MAP = {5: 4, 6: 3, 7: 2, 8: 1,
10: 8, 11: 7, 12: 6, 13: 5,
14: 12, 15: 11, 16: 10, 17: 9,
19: 16, 20: 15, 21: 14, 22: 13,
23: 20, 24: 19, 25: 18, 26: 17,
28: 24, 29: 23, 30: 22, 31: 21,
32: 28, 33: 27, 34: 26, 35: 25,
37: 32, 38: 31, 39: 30, 40: 29}
def create_position_map():
""" Maps compressed grid indices xi + yi * 8 to internal
board indices """
pos = {1: 45, 3: 46, 5: 47, 7: 48, 8: 39, 10: 40, 12: 41, 14: 42, 17: 34, 19: 35, 21: 36, 23: 37, 24: 28, 26: 29,
28: 30, 30: 31, 33: 23, 35: 24, 37: 25, 39: 26, 40: 17, 42: 18, 44: 19, 46: 20, 49: 12, 51: 13, 53: 14,
55: 15, 56: 6, 58: 7, 60: 8, 62: 9}
return pos
def create_key_map():
""" Maps internal board indices to checkerboard label | |
# Owner(s): ["module: nn"]
import inspect
import torch
from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests
# Returns a database of args & kwargs that can be used to construct each module.
# Each entry is in class -> (args, kwargs) format.
# Example: torch.nn.Linear -> ([10, 5], {})
# TODO: Merge this in with the initial ModuleInfo implementation.
def build_constructor_arg_db():
return {
torch.nn.AdaptiveAvgPool1d: ((5,), {}),
torch.nn.AdaptiveAvgPool2d: ((5,), {}),
torch.nn.AdaptiveAvgPool3d: ((5,), {}),
torch.nn.AdaptiveLogSoftmaxWithLoss: ((100, 20, [5, 10, 15]), {}),
torch.nn.AdaptiveMaxPool1d: ((5,), {}),
torch.nn.AdaptiveMaxPool2d: ((5,), {}),
torch.nn.AdaptiveMaxPool3d: ((5,), {}),
torch.nn.AlphaDropout: ((), {}),
torch.nn.AvgPool1d: ((3,), {}),
torch.nn.AvgPool2d: ((3,), {}),
torch.nn.AvgPool3d: ((3,), {}),
torch.nn.BCELoss: ((), {}),
torch.nn.BCEWithLogitsLoss: ((), {}),
torch.nn.BatchNorm1d: ((5,), {}),
torch.nn.BatchNorm2d: ((5,), {}),
torch.nn.BatchNorm3d: ((5,), {}),
torch.nn.Bilinear: ((2, 3, 4), {}),
torch.nn.CELU: ((), {}),
torch.nn.CTCLoss: ((), {}),
torch.nn.ChannelShuffle: ((4,), {}),
torch.nn.ConstantPad1d: ((2, 3.5), {}),
torch.nn.ConstantPad2d: ((2, 3.5), {}),
torch.nn.ConstantPad3d: ((2, 3.5), {}),
torch.nn.Conv1d: ((3, 3, 3), {}),
torch.nn.Conv2d: ((3, 3, 3), {}),
torch.nn.Conv3d: ((3, 3, 3), {}),
torch.nn.ConvTranspose1d: ((3, 3, 3), {}),
torch.nn.ConvTranspose2d: ((3, 3, 3), {}),
torch.nn.ConvTranspose3d: ((3, 3, 3), {}),
torch.nn.CosineEmbeddingLoss: ((), {}),
torch.nn.CosineSimilarity: ((), {}),
torch.nn.CrossEntropyLoss: ((), {}),
torch.nn.CrossMapLRN2d: ((5,), {}),
torch.nn.Dropout1d: ((), {}),
torch.nn.Dropout2d: ((), {}),
torch.nn.Dropout3d: ((), {}),
torch.nn.Dropout: ((), {}),
torch.nn.ELU: ((), {}),
torch.nn.Embedding: ((10, 5), {}),
torch.nn.EmbeddingBag: ((10, 5), {}),
torch.nn.FeatureAlphaDropout: ((), {}),
torch.nn.Flatten: ((), {}),
torch.nn.Fold: ((5, 2), {}),
torch.nn.FractionalMaxPool2d: ((5, 2), {}),
torch.nn.FractionalMaxPool3d: ((5, 2), {}),
torch.nn.GELU: ((), {}),
torch.nn.GLU: ((), {}),
torch.nn.GRU: ((5, 10), {}),
torch.nn.GRUCell: ((5, 10), {}),
torch.nn.GaussianNLLLoss: ((), {}),
torch.nn.GroupNorm: ((3, 6, 1e-5, True), {}),
torch.nn.Hardshrink: ((), {}),
torch.nn.Hardsigmoid: ((), {}),
torch.nn.Hardswish: ((), {}),
torch.nn.Hardtanh: ((), {}),
torch.nn.HingeEmbeddingLoss: ((), {}),
torch.nn.HuberLoss: ((), {}),
torch.nn.Identity: ((), {}),
torch.nn.InstanceNorm1d: ((5, 1e-5, 0.1, True), {}),
torch.nn.InstanceNorm2d: ((5, 1e-5, 0.1, True), {}),
torch.nn.InstanceNorm3d: ((5, 1e-5, 0.1, True), {}),
torch.nn.KLDivLoss: ((), {}),
torch.nn.L1Loss: ((), {}),
torch.nn.LPPool1d: ((2, 3), {}),
torch.nn.LPPool2d: ((2, 3), {}),
torch.nn.LSTM: ((5, 10), {}),
torch.nn.LSTMCell: ((5, 10), {}),
torch.nn.LayerNorm: ((2,), {}),
torch.nn.LazyBatchNorm1d: ((), {}),
torch.nn.LazyBatchNorm2d: ((), {}),
torch.nn.LazyBatchNorm3d: ((), {}),
torch.nn.LazyConv1d: ((5, 2), {}),
torch.nn.LazyConv2d: ((5, 2), {}),
torch.nn.LazyConv3d: ((5, 2), {}),
torch.nn.LazyConvTranspose1d: ((5, 2), {}),
torch.nn.LazyConvTranspose2d: ((5, 2), {}),
torch.nn.LazyConvTranspose3d: ((5, 2), {}),
torch.nn.LazyInstanceNorm1d: ((), {}),
torch.nn.LazyInstanceNorm2d: ((), {}),
torch.nn.LazyInstanceNorm3d: ((), {}),
torch.nn.LazyLinear: ((5,), {}),
torch.nn.LeakyReLU: ((), {}),
torch.nn.Linear: ((10, 5), {}),
torch.nn.LocalResponseNorm: ((2,), {}),
torch.nn.LogSigmoid: ((), {}),
torch.nn.LogSoftmax: ((), {}),
torch.nn.MSELoss: ((), {}),
torch.nn.MarginRankingLoss: ((), {}),
torch.nn.MaxPool1d: ((3,), {}),
torch.nn.MaxPool2d: ((3,), {}),
torch.nn.MaxPool3d: ((3,), {}),
torch.nn.MaxUnpool1d: ((5,), {}),
torch.nn.MaxUnpool2d: ((5,), {}),
torch.nn.MaxUnpool3d: ((5,), {}),
torch.nn.Mish: ((), {}),
torch.nn.ModuleDict: ((), {}),
torch.nn.ModuleList: ((), {}),
torch.nn.MultiLabelMarginLoss: ((), {}),
torch.nn.MultiLabelSoftMarginLoss: ((), {}),
torch.nn.MultiMarginLoss: ((), {}),
torch.nn.MultiheadAttention: ((100, 2), {}),
torch.nn.NLLLoss2d: ((), {}),
torch.nn.NLLLoss: ((), {}),
torch.nn.PReLU: ((), {}),
torch.nn.PairwiseDistance: ((), {}),
torch.nn.ParameterDict: ((), {}),
torch.nn.ParameterList: ((), {}),
torch.nn.PixelShuffle: ((2,), {}),
torch.nn.PixelUnshuffle: ((2,), {}),
torch.nn.PoissonNLLLoss: ((), {}),
torch.nn.RNN: ((5, 10), {}),
torch.nn.RNNBase: (('LSTM', 5, 10), {}),
torch.nn.RNNCell: ((5, 10), {}),
torch.nn.RNNCellBase: ((5, 10, True, 2), {}),
torch.nn.RReLU: ((), {}),
torch.nn.ReLU6: ((), {}),
torch.nn.ReLU: ((), {}),
torch.nn.ReflectionPad1d: ((2,), {}),
torch.nn.ReflectionPad2d: ((2,), {}),
torch.nn.ReflectionPad3d: ((2,), {}),
torch.nn.ReplicationPad1d: ((2,), {}),
torch.nn.ReplicationPad2d: ((2,), {}),
torch.nn.ReplicationPad3d: ((2,), {}),
torch.nn.SELU: ((), {}),
torch.nn.Sequential: ((), {}),
torch.nn.SiLU: ((), {}),
torch.nn.Sigmoid: ((), {}),
torch.nn.SmoothL1Loss: ((), {}),
torch.nn.SoftMarginLoss: ((), {}),
torch.nn.Softmax2d: ((), {}),
torch.nn.Softmax: ((), {}),
torch.nn.Softmin: ((), {}),
torch.nn.Softplus: ((), {}),
torch.nn.Softshrink: ((), {}),
torch.nn.Softsign: ((), {}),
torch.nn.SyncBatchNorm: ((5,), {}),
torch.nn.Tanh: ((), {}),
torch.nn.Tanhshrink: ((), {}),
torch.nn.Threshold: ((0.1, 20), {}),
torch.nn.Transformer: ((), {}),
torch.nn.TransformerDecoder: ((torch.nn.TransformerDecoderLayer, 3), {}),
torch.nn.TransformerDecoderLayer: ((10, 2), {}),
torch.nn.TransformerEncoder: ((torch.nn.TransformerEncoderLayer, 3), {}),
torch.nn.TransformerEncoderLayer: ((10, 2), {}),
torch.nn.TripletMarginLoss: ((), {}),
torch.nn.TripletMarginWithDistanceLoss: ((), {}),
torch.nn.Unflatten: ((1, (2, 5, 5)), {}),
torch.nn.Unfold: ((3,), {}),
torch.nn.Upsample: ((), {}),
torch.nn.UpsamplingBilinear2d: ((), {}),
torch.nn.UpsamplingNearest2d: ((), {}),
torch.nn.ZeroPad2d: ((0,), {}),
torch.nn.qat.Conv1d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.nn.qat.Conv2d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.nn.qat.Conv3d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.nn.qat.Linear: ((5, 2), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.nn.qat.Embedding: ((10, 12), {
'qconfig': torch.ao.quantization.float_qparams_weight_only_qconfig,
}),
torch.nn.qat.EmbeddingBag: ((10, 12), {
'qconfig': torch.ao.quantization.float_qparams_weight_only_qconfig,
}),
torch.nn.quantizable.LSTM: ((5, 6), {}),
torch.nn.quantizable.LSTMCell: ((5, 6), {}),
torch.nn.quantizable.MultiheadAttention: ((10, 2), {}),
torch.nn.quantized.BatchNorm2d: ((2,), {}),
torch.nn.quantized.BatchNorm3d: ((2,), {}),
torch.nn.quantized.Dropout: ((), {}),
torch.nn.quantized.Conv1d: ((3, 3, 3), {}),
torch.nn.quantized.Conv2d: ((3, 3, 3), {}),
torch.nn.quantized.Conv3d: ((3, 3, 3), {}),
torch.nn.quantized.ConvTranspose1d: ((3, 3, 3), {}),
torch.nn.quantized.ConvTranspose2d: ((3, 3, 3), {}),
torch.nn.quantized.ConvTranspose3d: ((16, 33, (3, 3, 5)), {
'stride': (2, 1, 1),
'padding': (4, 2, 2),
'output_padding': (2, 2, 2),
'dilation': (1, 1, 1),
}),
torch.nn.quantized.DeQuantize: ((), {}),
torch.nn.quantized.ELU: ((0.01, 0), {}),
torch.nn.quantized.Embedding: ((10, 3), {
'factory_kwargs': {},
}),
torch.nn.quantized.EmbeddingBag: ((10, 3), {
'factory_kwargs': {},
}),
torch.nn.quantized.GroupNorm: ((2, 4, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.nn.quantized.Hardswish: ((0.1, 0,), {}),
torch.nn.quantized.InstanceNorm1d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.nn.quantized.InstanceNorm2d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.nn.quantized.InstanceNorm3d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.nn.quantized.LayerNorm: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.nn.quantized.LeakyReLU: ((0.01, 0), {}),
torch.nn.quantized.Linear: ((5, 2), {
'factory_kwargs': {},
}),
torch.nn.quantized.MaxPool2d: ((3,), {}),
torch.nn.quantized.Quantize: ((0.1, 0), {
'dtype': torch.int16,
'factory_kwargs': {},
}),
torch.nn.quantized.ReLU6: ((), {}),
torch.nn.quantized.Sigmoid: ((0.1, 0), {}),
torch.nn.quantized.Softmax: ((), {}),
torch.nn.quantized.FloatFunctional: ((), {}),
torch.nn.quantized.FXFloatFunctional: ((), {}),
torch.nn.quantized.QFunctional: ((), {}),
}
# Instantiates the given class with the given args, kwargs, optionally on a given device.
def instantiate_class(cls, args, kwargs, extra_kwargs):
return cls(*args, **kwargs) if extra_kwargs is None else cls(*args, **kwargs, **extra_kwargs)
# Returns a function that calls the real implementation of a method
# in addition to passing args to a mock object.
def mock_wrapper(method):
mock = MagicMock()
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock
return wrapper
# Returns a set of args / kwargs that can be used to construct the module.
def get_example_args(module_cls, constructor_arg_db, extra_kwargs=None):
assert module_cls in constructor_arg_db, \
f"No entry for {module_cls} in the constructor arg DB. Please add it to pass these tests."
args, kwargs = constructor_arg_db[module_cls]
extra_kwargs = {} if extra_kwargs is None else extra_kwargs
# Recursively instantiate args / kwargs that are class objects.
args = [instantiate_class(arg, *get_example_args(arg, constructor_arg_db), extra_kwargs=extra_kwargs)
if inspect.isclass(arg) else torch.nn.Parameter(arg.to(**extra_kwargs))
if isinstance(arg, torch.nn.Parameter) else arg for arg in args]
kwargs = {k: instantiate_class(v, *get_example_args(v, constructor_arg_db), extra_kwargs=extra_kwargs)
if inspect.isclass(v) else torch.nn.Parameter(v.to(*extra_kwargs))
if isinstance(v, torch.nn.Parameter) else v for k, v in kwargs.items()}
kwargs.update(extra_kwargs)
return args, kwargs
def generate_test_func(test_cls, module_cls, constructor_arg_db,
verify_kwargs=True, module_is_lazy=False, check_nonexistent_arg=True):
# Generate a function for testing the given module.
@dtypes(*floating_types())
def run_test(test_cls, device, dtype, module_cls=module_cls):
# Check if this module creates parameters or registers buffers.
# The mock magic here passes through to the real Parameter / register_buffer
# logic and is only used to check for calls.
args, kwargs = get_example_args(module_cls, constructor_arg_db)
# Some modules need to pass factory_kwargs so as not to conflict with existing args such as dtype.
module_needs_factory_kwargs = 'factory_kwargs' in kwargs
if module_needs_factory_kwargs:
del kwargs['factory_kwargs']
extra_kwargs = {
'factory_kwargs': {
'device': device,
'dtype': dtype,
}
}
else:
extra_kwargs = {
'device': device,
'dtype': dtype,
}
parameter_new = mock_wrapper(torch.nn.Parameter.__new__)
with patch.object(torch.nn.Parameter, '__new__', parameter_new):
register_buffer = mock_wrapper(torch.nn.Module.register_buffer)
with patch.object(torch.nn.Module, 'register_buffer', register_buffer):
m = module_cls(*args, **kwargs)
module_creates_params_or_buffers = parameter_new.mock.called or register_buffer.mock.called
# == Verify factory kwargs are supported. ==
if verify_kwargs and module_creates_params_or_buffers:
args, kwargs = get_example_args(module_cls, constructor_arg_db,
extra_kwargs=extra_kwargs)
if module_is_lazy:
# Ensure device and dtype are passed to all UninitializedParameters and UninitializedBuffers.
uninit_param_new = mock_wrapper(torch.nn.UninitializedParameter.__new__)
with patch.object(torch.nn.UninitializedParameter, '__new__', uninit_param_new):
uninit_buffer_new = mock_wrapper(torch.nn.UninitializedBuffer.__new__)
with patch.object(torch.nn.UninitializedBuffer, '__new__', uninit_buffer_new):
m = module_cls(*args, **kwargs)
uninit_param_new.mock.assert_has_calls(
[mock.call(device=device, dtype=dtype) for _ in uninit_param_new.mock.mock_calls])
uninit_buffer_new.mock.assert_has_calls(
[mock.call(device=device, dtype=dtype) for _ in uninit_buffer_new.mock.mock_calls])
else:
# Check device placement and dtype for parameters and buffers.
# Only verify floating point dtypes since that's what the kwarg applies to.
# Note that dtype verification is also skipped if the module requires factory_kwargs.
m = module_cls(*args, **kwargs)
for name, param in m.named_parameters():
test_cls.assertEqual(
str(param.device), device,
f'Parameter {name} is on {param.device.type} instead of the expected device {device}')
if param.dtype.is_floating_point and not module_needs_factory_kwargs:
test_cls.assertEqual(
param.dtype, dtype,
f'Parameter {name} is of dtype {param.dtype} instead of the expected dtype {dtype}')
for name, buffer in m.named_buffers():
test_cls.assertEqual(
str(buffer.device), device,
f'Buffer {name} is on {buffer.device.type} instead of the expected device {device}')
if buffer.dtype.is_floating_point and |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.